Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_next/lib/python3.10/site-packages/torch/_logging/__init__.py +15 -0
- llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_logging/_internal.py +761 -0
- llava_next/lib/python3.10/site-packages/torch/_logging/_registrations.py +80 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/__init__.py +28 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py +879 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py +165 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/_ndarray.py +564 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py +332 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/_util.py +251 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/random.py +160 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__init__.py +17 -0
- llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/ATen/ATenConfig.cmake +9 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake +130 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets-release.cmake +59 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets.cmake +180 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake +1073 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUSPARSELT.cmake +62 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake +11 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake +78 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake +40 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake +1979 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake +106 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake +109 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake +303 -0
- llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake +273 -0
llava_next/lib/python3.10/site-packages/torch/_logging/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Top level logging module for torch logging
|
| 2 |
+
# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
|
| 3 |
+
# Simple setup for onboarding (see above doc for more detail):
|
| 4 |
+
# 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples)
|
| 5 |
+
# 2. register any artifacts (<artifact_name> below) in torch._logging._registrations
|
| 6 |
+
# a. call getArtifactLogger(__name__, <artifact_name>) at your logging site instead of the standard logger to log your artifact
|
| 7 |
+
import torch._logging._registrations
|
| 8 |
+
from ._internal import (
|
| 9 |
+
_init_logs,
|
| 10 |
+
DEFAULT_LOGGING,
|
| 11 |
+
getArtifactLogger,
|
| 12 |
+
LazyString,
|
| 13 |
+
set_logs,
|
| 14 |
+
warning_once,
|
| 15 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (384 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc
ADDED
|
Binary file (22.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc
ADDED
|
Binary file (2.79 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_logging/_internal.py
ADDED
|
@@ -0,0 +1,761 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
from importlib import __import__
|
| 8 |
+
from typing import Dict, Optional, Set, Union
|
| 9 |
+
from weakref import WeakSet
|
| 10 |
+
|
| 11 |
+
log = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
DEFAULT_LOG_LEVEL = logging.WARN
|
| 14 |
+
LOG_ENV_VAR = "TORCH_LOGS"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class LogRegistry:
|
| 19 |
+
# shorthand name to log qualified name
|
| 20 |
+
# Note: this only contains loggers registered
|
| 21 |
+
# from register_log
|
| 22 |
+
# e.g. "dynamo" -> "torch._dynamo"
|
| 23 |
+
log_alias_to_log_qname: Dict[str, str] = field(default_factory=dict)
|
| 24 |
+
|
| 25 |
+
# artifact logger qualified names,
|
| 26 |
+
# this is populated lazily, as calls to getArtifactLogger
|
| 27 |
+
# currently formatted as <module>.__<artifact_name>
|
| 28 |
+
# e.g. "torch._dynamo.convert_frame.__guards"
|
| 29 |
+
artifact_log_qnames: Set[str] = field(default_factory=set)
|
| 30 |
+
|
| 31 |
+
# child logs of registered logs if specified via open
|
| 32 |
+
# registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
|
| 33 |
+
# these need to be tracked so their levels can be reset properly
|
| 34 |
+
# e.g. "torch._dynamo.output_graph"
|
| 35 |
+
child_log_qnames: Set[str] = field(default_factory=set)
|
| 36 |
+
|
| 37 |
+
# artifact names, populated by register_artifact
|
| 38 |
+
# e.g. "guards"
|
| 39 |
+
artifact_names: Set[str] = field(default_factory=set)
|
| 40 |
+
|
| 41 |
+
# Artifacts that should be visible by default in the error message
|
| 42 |
+
visible_artifacts: Set[str] = field(default_factory=set)
|
| 43 |
+
|
| 44 |
+
# A short description of each artifact
|
| 45 |
+
artifact_descriptions: Dict[str, str] = field(default_factory=dict)
|
| 46 |
+
|
| 47 |
+
# artifacts which are not displayed unless explicitly named in the
|
| 48 |
+
# settings. Ex. output_code is NOT displayed even if the inductor
|
| 49 |
+
# log level is set to DEBUG. It must be explicitly named in the settings
|
| 50 |
+
off_by_default_artifact_names: Set[str] = field(default_factory=set)
|
| 51 |
+
|
| 52 |
+
# logging format string for artifacts
|
| 53 |
+
artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict)
|
| 54 |
+
|
| 55 |
+
def is_artifact(self, name):
|
| 56 |
+
return name in self.artifact_names
|
| 57 |
+
|
| 58 |
+
def is_log(self, alias):
|
| 59 |
+
return alias in self.log_alias_to_log_qname
|
| 60 |
+
|
| 61 |
+
# register a log with an alias
|
| 62 |
+
def register_log(self, alias, log_qname):
|
| 63 |
+
self.log_alias_to_log_qname[alias] = log_qname
|
| 64 |
+
|
| 65 |
+
# register an artifact name
|
| 66 |
+
def register_artifact_name(
|
| 67 |
+
self, name, description, visible, off_by_default, log_format
|
| 68 |
+
):
|
| 69 |
+
self.artifact_names.add(name)
|
| 70 |
+
if visible:
|
| 71 |
+
self.visible_artifacts.add(name)
|
| 72 |
+
self.artifact_descriptions[name] = description
|
| 73 |
+
|
| 74 |
+
# if off by default, don't enable it
|
| 75 |
+
# when log_name's log_level is set to DEBUG
|
| 76 |
+
if off_by_default:
|
| 77 |
+
self.off_by_default_artifact_names.add(name)
|
| 78 |
+
|
| 79 |
+
if log_format is not None:
|
| 80 |
+
self.artifact_log_formatters[name] = logging.Formatter(log_format)
|
| 81 |
+
|
| 82 |
+
# register the qualified name of an artifact log
|
| 83 |
+
# this is needed to know which logs need to be reset
|
| 84 |
+
# whenever the log_state is changed
|
| 85 |
+
def register_artifact_log(self, artifact_log_qname):
|
| 86 |
+
self.artifact_log_qnames.add(artifact_log_qname)
|
| 87 |
+
|
| 88 |
+
def register_child_log(self, log_qname):
|
| 89 |
+
self.child_log_qnames.add(log_qname)
|
| 90 |
+
|
| 91 |
+
def get_log_qnames(self):
|
| 92 |
+
return set(self.log_alias_to_log_qname.values())
|
| 93 |
+
|
| 94 |
+
def get_artifact_log_qnames(self):
|
| 95 |
+
return set(self.artifact_log_qnames)
|
| 96 |
+
|
| 97 |
+
def get_child_log_qnames(self):
|
| 98 |
+
return set(self.child_log_qnames)
|
| 99 |
+
|
| 100 |
+
def is_off_by_default(self, artifact_qname):
|
| 101 |
+
return artifact_qname in self.off_by_default_artifact_names
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@dataclass
|
| 105 |
+
class LogState:
|
| 106 |
+
# qualified log names -> currently set log level
|
| 107 |
+
log_qname_to_level: Dict[str, str] = field(default_factory=dict)
|
| 108 |
+
|
| 109 |
+
# the set of currently enabled artifacts
|
| 110 |
+
artifact_names: Set[str] = field(default_factory=set)
|
| 111 |
+
|
| 112 |
+
def enable_artifact(self, artifact_name):
|
| 113 |
+
self.artifact_names.add(artifact_name)
|
| 114 |
+
|
| 115 |
+
def is_artifact_enabled(self, name):
|
| 116 |
+
return name in self.artifact_names
|
| 117 |
+
|
| 118 |
+
def enable_log(self, log_qname, log_level):
|
| 119 |
+
self.log_qname_to_level[log_qname] = log_level
|
| 120 |
+
|
| 121 |
+
def get_log_level_pairs(self):
|
| 122 |
+
return self.log_qname_to_level.items()
|
| 123 |
+
|
| 124 |
+
def clear(self):
|
| 125 |
+
self.log_qname_to_level.clear()
|
| 126 |
+
self.artifact_names.clear()
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
log_registry = LogRegistry()
|
| 130 |
+
log_state = LogState()
|
| 131 |
+
|
| 132 |
+
# sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
|
| 133 |
+
DEFAULT_LOGGING = {
|
| 134 |
+
"graph_breaks": True,
|
| 135 |
+
"recompiles": True,
|
| 136 |
+
"dynamic": logging.INFO,
|
| 137 |
+
"guards": True,
|
| 138 |
+
"trace_source": True,
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def set_logs(
|
| 143 |
+
*,
|
| 144 |
+
all: Optional[int] = None,
|
| 145 |
+
dynamo: Optional[int] = None,
|
| 146 |
+
aot: Optional[int] = None,
|
| 147 |
+
dynamic: Optional[int] = None,
|
| 148 |
+
inductor: Optional[int] = None,
|
| 149 |
+
distributed: Optional[int] = None,
|
| 150 |
+
onnx: Optional[int] = None,
|
| 151 |
+
bytecode: bool = False,
|
| 152 |
+
aot_graphs: bool = False,
|
| 153 |
+
aot_joint_graph: bool = False,
|
| 154 |
+
ddp_graphs: bool = False,
|
| 155 |
+
graph: bool = False,
|
| 156 |
+
graph_code: bool = False,
|
| 157 |
+
graph_breaks: bool = False,
|
| 158 |
+
graph_sizes: bool = False,
|
| 159 |
+
guards: bool = False,
|
| 160 |
+
recompiles: bool = False,
|
| 161 |
+
trace_source: bool = False,
|
| 162 |
+
trace_call: bool = False,
|
| 163 |
+
output_code: bool = False,
|
| 164 |
+
schedule: bool = False,
|
| 165 |
+
perf_hints: bool = False,
|
| 166 |
+
onnx_diagnostics: bool = False,
|
| 167 |
+
modules: Optional[Dict[str, Union[int, bool]]] = None,
|
| 168 |
+
):
|
| 169 |
+
"""
|
| 170 |
+
Sets the log level for individual components and toggles individual log
|
| 171 |
+
artifact types.
|
| 172 |
+
|
| 173 |
+
.. warning:: This feature is a prototype and may have compatibility
|
| 174 |
+
breaking changes in the future.
|
| 175 |
+
|
| 176 |
+
.. note:: The ``TORCH_LOGS`` environment variable has complete precedence
|
| 177 |
+
over this function, so if it was set, this function does nothing.
|
| 178 |
+
|
| 179 |
+
A component is a set of related features in PyTorch. All of the log
|
| 180 |
+
messages emitted from a given component have their own log levels. If the
|
| 181 |
+
log level of a particular message has priority greater than or equal to its
|
| 182 |
+
component's log level setting, it is emitted. Otherwise, it is supressed.
|
| 183 |
+
This allows you to, for instance, silence large groups of log messages that
|
| 184 |
+
are not relevant to you and increase verbosity of logs for components that
|
| 185 |
+
are relevant. The expected log level values, ordered from highest to lowest
|
| 186 |
+
priority, are:
|
| 187 |
+
|
| 188 |
+
* ``logging.CRITICAL``
|
| 189 |
+
* ``logging.ERROR``
|
| 190 |
+
* ``logging.WARNING``
|
| 191 |
+
* ``logging.INFO``
|
| 192 |
+
* ``logging.DEBUG``
|
| 193 |
+
* ``logging.NOTSET``
|
| 194 |
+
|
| 195 |
+
See documentation for the Python ``logging`` module for more information on
|
| 196 |
+
log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
|
| 197 |
+
|
| 198 |
+
An artifact is a particular type of log message. Each artifact is assigned
|
| 199 |
+
to a parent component. A component can emit many different kinds of
|
| 200 |
+
artifacts. In general, an artifact is emitted if either its corresponding
|
| 201 |
+
setting in the argument list below is turned on or if its parent component
|
| 202 |
+
is set to a log level less than or equal to the log level of the artifact.
|
| 203 |
+
|
| 204 |
+
Keyword args:
|
| 205 |
+
all (:class:`Optional[int]`):
|
| 206 |
+
The default log level for all components. Default: ``logging.WARN``
|
| 207 |
+
|
| 208 |
+
dynamo (:class:`Optional[int]`):
|
| 209 |
+
The log level for the TorchDynamo component. Default: ``logging.WARN``
|
| 210 |
+
|
| 211 |
+
aot (:class:`Optional[int]`):
|
| 212 |
+
The log level for the AOTAutograd component. Default: ``logging.WARN``
|
| 213 |
+
|
| 214 |
+
inductor (:class:`Optional[int]`):
|
| 215 |
+
The log level for the TorchInductor component. Default: ``logging.WARN``
|
| 216 |
+
|
| 217 |
+
dynamic (:class:`Optional[int]`):
|
| 218 |
+
The log level for dynamic shapes. Default: ``logging.WARN``
|
| 219 |
+
|
| 220 |
+
distributed (:class:`Optional[int]`):
|
| 221 |
+
Whether to log communication operations and other debug info from pytorch distributed components.
|
| 222 |
+
Default: ``logging.WARN``
|
| 223 |
+
|
| 224 |
+
onnx (:class:`Optional[int]`):
|
| 225 |
+
The log level for the ONNX exporter component. Default: ``logging.WARN``
|
| 226 |
+
|
| 227 |
+
bytecode (:class:`bool`):
|
| 228 |
+
Whether to emit the original and generated bytecode from TorchDynamo.
|
| 229 |
+
Default: ``False``
|
| 230 |
+
|
| 231 |
+
aot_graphs (:class:`bool`):
|
| 232 |
+
Whether to emit the graphs generated by AOTAutograd. Default: ``False``
|
| 233 |
+
|
| 234 |
+
aot_joint_graph (:class:`bool`):
|
| 235 |
+
Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
|
| 236 |
+
|
| 237 |
+
ddp_graphs (:class:`bool`):
|
| 238 |
+
Whether to emit graphs generated by DDPOptimizer. Default: ``False``
|
| 239 |
+
|
| 240 |
+
graph (:class:`bool`):
|
| 241 |
+
Whether to emit the graph captured by TorchDynamo in tabular format.
|
| 242 |
+
Default: ``False``
|
| 243 |
+
|
| 244 |
+
graph_code (:class:`bool`):
|
| 245 |
+
Whether to emit the python source of the graph captured by TorchDynamo.
|
| 246 |
+
Default: ``False``
|
| 247 |
+
|
| 248 |
+
graph_breaks (:class:`bool`):
|
| 249 |
+
Whether to emit the graph breaks encountered by TorchDynamo.
|
| 250 |
+
Default: ``False``
|
| 251 |
+
|
| 252 |
+
graph_sizes (:class:`bool`):
|
| 253 |
+
Whether to emit tensor sizes of the graph captured by TorchDynamo.
|
| 254 |
+
Default: ``False``
|
| 255 |
+
|
| 256 |
+
guards (:class:`bool`):
|
| 257 |
+
Whether to emit the guards generated by TorchDynamo for each compiled
|
| 258 |
+
function. Default: ``False``
|
| 259 |
+
|
| 260 |
+
recompiles (:class:`bool`):
|
| 261 |
+
Whether to emit a guard failure reason and message every time
|
| 262 |
+
TorchDynamo recompiles a function. Default: ``False``
|
| 263 |
+
|
| 264 |
+
trace_source (:class:`bool`):
|
| 265 |
+
Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
|
| 266 |
+
|
| 267 |
+
trace_call (:class:`bool`):
|
| 268 |
+
Whether to emit detailed line location when TorchDynamo creates an FX node
|
| 269 |
+
corresponding to function call. Python 3.11+ only. Default: ``False``
|
| 270 |
+
|
| 271 |
+
output_code (:class:`bool`):
|
| 272 |
+
Whether to emit the TorchInductor output code. Default: ``False``
|
| 273 |
+
|
| 274 |
+
schedule (:class:`bool`):
|
| 275 |
+
Whether to emit the TorchInductor schedule. Default: ``False``
|
| 276 |
+
|
| 277 |
+
perf_hints (:class:`bool`):
|
| 278 |
+
Whether to emit the TorchInductor perf hints. Default: ``False``
|
| 279 |
+
|
| 280 |
+
onnx_diagnostics (:class:`bool`):
|
| 281 |
+
Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
|
| 282 |
+
|
| 283 |
+
modules (dict):
|
| 284 |
+
This argument provides an alternate way to specify the above log
|
| 285 |
+
component and artifact settings, in the format of a keyword args
|
| 286 |
+
dictionary given as a single argument. There are two cases
|
| 287 |
+
where this is useful (1) if a new log component or artifact has
|
| 288 |
+
been registered but a keyword argument for it has not been added
|
| 289 |
+
to this function and (2) if the log level for an unregistered module
|
| 290 |
+
needs to be set. This can be done by providing the fully-qualified module
|
| 291 |
+
name as the key, with the log level as the value. Default: ``None``
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
Example::
|
| 295 |
+
|
| 296 |
+
>>> # xdoctest: +SKIP
|
| 297 |
+
>>> import logging
|
| 298 |
+
|
| 299 |
+
# The following changes the "dynamo" component to emit DEBUG-level
|
| 300 |
+
# logs, and to emit "graph_code" artifacts.
|
| 301 |
+
|
| 302 |
+
>>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
|
| 303 |
+
|
| 304 |
+
# The following enables the logs for a different module
|
| 305 |
+
|
| 306 |
+
>>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
|
| 307 |
+
"""
|
| 308 |
+
# ignore if env var is set
|
| 309 |
+
if LOG_ENV_VAR in os.environ:
|
| 310 |
+
log.warning(
|
| 311 |
+
"Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
|
| 312 |
+
)
|
| 313 |
+
return
|
| 314 |
+
|
| 315 |
+
log_state.clear()
|
| 316 |
+
|
| 317 |
+
modules = modules or {}
|
| 318 |
+
|
| 319 |
+
def _set_logs(**kwargs):
|
| 320 |
+
default_level = kwargs.pop("all", None)
|
| 321 |
+
if default_level:
|
| 322 |
+
if default_level not in logging._levelToName:
|
| 323 |
+
raise ValueError(
|
| 324 |
+
f"Unrecognized log level for kwarg all: {default_level}, valid level values "
|
| 325 |
+
f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
# add any missing aliases to kwargs
|
| 329 |
+
for alias in log_registry.log_alias_to_log_qname.keys():
|
| 330 |
+
if alias not in kwargs:
|
| 331 |
+
kwargs[alias] = default_level
|
| 332 |
+
else:
|
| 333 |
+
default_level = DEFAULT_LOG_LEVEL
|
| 334 |
+
|
| 335 |
+
for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
|
| 336 |
+
if val is None:
|
| 337 |
+
val = default_level
|
| 338 |
+
|
| 339 |
+
if log_registry.is_artifact(alias):
|
| 340 |
+
if not isinstance(val, bool):
|
| 341 |
+
raise ValueError(
|
| 342 |
+
f"Expected bool to enable artifact {alias}, received {val}"
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
if val:
|
| 346 |
+
log_state.enable_artifact(alias)
|
| 347 |
+
elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
|
| 348 |
+
if val not in logging._levelToName:
|
| 349 |
+
raise ValueError(
|
| 350 |
+
f"Unrecognized log level for log {alias}: {val}, valid level values "
|
| 351 |
+
f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
log_state.enable_log(
|
| 355 |
+
log_registry.log_alias_to_log_qname.get(alias, alias), val
|
| 356 |
+
)
|
| 357 |
+
elif alias == "all":
|
| 358 |
+
continue
|
| 359 |
+
else:
|
| 360 |
+
raise ValueError(
|
| 361 |
+
f"Unrecognized log or artifact name passed to set_logs: {alias}"
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
_init_logs()
|
| 365 |
+
|
| 366 |
+
_set_logs(
|
| 367 |
+
all=all,
|
| 368 |
+
dynamo=dynamo,
|
| 369 |
+
aot=aot,
|
| 370 |
+
inductor=inductor,
|
| 371 |
+
dynamic=dynamic,
|
| 372 |
+
bytecode=bytecode,
|
| 373 |
+
aot_graphs=aot_graphs,
|
| 374 |
+
aot_joint_graph=aot_joint_graph,
|
| 375 |
+
ddp_graphs=ddp_graphs,
|
| 376 |
+
distributed=distributed,
|
| 377 |
+
graph=graph,
|
| 378 |
+
graph_code=graph_code,
|
| 379 |
+
graph_breaks=graph_breaks,
|
| 380 |
+
graph_sizes=graph_sizes,
|
| 381 |
+
guards=guards,
|
| 382 |
+
recompiles=recompiles,
|
| 383 |
+
trace_source=trace_source,
|
| 384 |
+
trace_call=trace_call,
|
| 385 |
+
output_code=output_code,
|
| 386 |
+
schedule=schedule,
|
| 387 |
+
perf_hints=perf_hints,
|
| 388 |
+
onnx=onnx,
|
| 389 |
+
onnx_diagnostics=onnx_diagnostics,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def get_loggers():
|
| 394 |
+
"""
|
| 395 |
+
Returns: a list of all registered loggers
|
| 396 |
+
"""
|
| 397 |
+
return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def register_log(setting_name, log_name):
|
| 401 |
+
"""
|
| 402 |
+
Enables a log to be controlled by the env var and user API with the setting_name
|
| 403 |
+
Args:
|
| 404 |
+
setting_name: the shorthand name used in the env var and user API
|
| 405 |
+
log_name: the log name that the setting_name is associated with
|
| 406 |
+
"""
|
| 407 |
+
log_registry.register_log(setting_name, log_name)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def register_artifact(
|
| 411 |
+
setting_name, description, visible=False, off_by_default=False, log_format=None
|
| 412 |
+
):
|
| 413 |
+
"""
|
| 414 |
+
Enables an artifact to be controlled by the env var and user API with name
|
| 415 |
+
Args:
|
| 416 |
+
setting_name: the shorthand name used in the env var and user API
|
| 417 |
+
description: A description of what this outputs
|
| 418 |
+
visible: Whether it gets suggested to users by default
|
| 419 |
+
off_by_default: whether this artifact should be logged when the ancestor loggers
|
| 420 |
+
are enabled at level DEBUG
|
| 421 |
+
"""
|
| 422 |
+
log_registry.register_artifact_name(
|
| 423 |
+
setting_name, description, visible, off_by_default, log_format
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def getArtifactLogger(module_qname, artifact_name):
|
| 428 |
+
if artifact_name not in log_registry.artifact_names:
|
| 429 |
+
raise ValueError(
|
| 430 |
+
f"Artifact name: {repr(artifact_name)} not registered,"
|
| 431 |
+
f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
|
| 432 |
+
)
|
| 433 |
+
qname = module_qname + f".__{artifact_name}"
|
| 434 |
+
log = logging.getLogger(qname)
|
| 435 |
+
log.artifact_name = artifact_name # type: ignore[attr-defined]
|
| 436 |
+
log_registry.register_artifact_log(qname)
|
| 437 |
+
configure_artifact_log(log)
|
| 438 |
+
return log
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
INCR_VERBOSITY_CHAR = "+"
|
| 442 |
+
DECR_VERBOSITY_CHAR = "-"
|
| 443 |
+
VERBOSITY_REGEX = (
|
| 444 |
+
"("
|
| 445 |
+
+ "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
|
| 446 |
+
+ "?)"
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def configure_artifact_log(log):
|
| 451 |
+
# If the artifact is off by default, then it should only be logged when explicitly
|
| 452 |
+
# enabled; set propagate to False so that this artifact is not propagated
|
| 453 |
+
# to its ancestor logger
|
| 454 |
+
if log_registry.is_off_by_default(log.artifact_name):
|
| 455 |
+
log.propagate = False
|
| 456 |
+
|
| 457 |
+
# enable artifact logging when explicitly enabled
|
| 458 |
+
if log_state.is_artifact_enabled(log.artifact_name):
|
| 459 |
+
log.setLevel(logging.DEBUG)
|
| 460 |
+
log.propagate = True
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
# match a comma separated list of loggable names (whitespace allowed after commas)
|
| 464 |
+
def _gen_settings_regex():
|
| 465 |
+
return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _validate_settings(settings):
|
| 469 |
+
return re.fullmatch(_gen_settings_regex(), settings) is not None
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def help_message(verbose=False):
|
| 473 |
+
def pad_to(s, length=30):
|
| 474 |
+
assert len(s) <= length
|
| 475 |
+
return s + " " * (length - len(s))
|
| 476 |
+
|
| 477 |
+
if verbose:
|
| 478 |
+
printed_artifacts = log_registry.artifact_names
|
| 479 |
+
else:
|
| 480 |
+
printed_artifacts = log_registry.visible_artifacts
|
| 481 |
+
|
| 482 |
+
if verbose:
|
| 483 |
+
heading = "All registered names"
|
| 484 |
+
else:
|
| 485 |
+
heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
|
| 486 |
+
lines = (
|
| 487 |
+
["all"]
|
| 488 |
+
+ list(log_registry.log_alias_to_log_qname.keys())
|
| 489 |
+
+ [
|
| 490 |
+
f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
|
| 491 |
+
for name in printed_artifacts
|
| 492 |
+
]
|
| 493 |
+
)
|
| 494 |
+
setting_info = " " + "\n ".join(lines)
|
| 495 |
+
examples = """
|
| 496 |
+
Examples:
|
| 497 |
+
TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
|
| 498 |
+
logging.DEBUG and AOT to logging.INFO
|
| 499 |
+
|
| 500 |
+
TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
|
| 501 |
+
logging.ERROR and TorchInductor to logging.DEBUG
|
| 502 |
+
|
| 503 |
+
TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
|
| 504 |
+
|
| 505 |
+
TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
|
| 506 |
+
to logging.DEBUG and enable the schedule artifact
|
| 507 |
+
|
| 508 |
+
TORCH_LOGS="+some.random.module,schedule" will set the log level of
|
| 509 |
+
some.random.module to logging.DEBUG and enable the schedule artifact
|
| 510 |
+
""" # flake8: noqa: B950
|
| 511 |
+
msg = f"""
|
| 512 |
+
TORCH_LOGS Info
|
| 513 |
+
{examples}
|
| 514 |
+
|
| 515 |
+
{heading}
|
| 516 |
+
{setting_info}
|
| 517 |
+
"""
|
| 518 |
+
return msg
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def _invalid_settings_err_msg(settings, verbose=False):
|
| 522 |
+
valid_settings = ", ".join(
|
| 523 |
+
["all"]
|
| 524 |
+
+ list(log_registry.log_alias_to_log_qname.keys())
|
| 525 |
+
+ list(log_registry.artifact_names)
|
| 526 |
+
)
|
| 527 |
+
msg = f"""
|
| 528 |
+
Invalid log settings: {settings}, must be a comma separated list of fully
|
| 529 |
+
qualified module names, registered log names or registered artifact names.
|
| 530 |
+
For more info on various settings, try TORCH_LOGS="help"
|
| 531 |
+
Valid settings:
|
| 532 |
+
{valid_settings}
|
| 533 |
+
"""
|
| 534 |
+
return msg
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
@functools.lru_cache
|
| 538 |
+
def _parse_log_settings(settings):
|
| 539 |
+
if settings == "":
|
| 540 |
+
return dict()
|
| 541 |
+
|
| 542 |
+
if settings == "help":
|
| 543 |
+
raise ValueError(help_message(verbose=False))
|
| 544 |
+
elif settings == "+help":
|
| 545 |
+
raise ValueError(help_message(verbose=True))
|
| 546 |
+
if not _validate_settings(settings):
|
| 547 |
+
raise ValueError(_invalid_settings_err_msg(settings))
|
| 548 |
+
|
| 549 |
+
settings = re.sub(r"\s+", "", settings)
|
| 550 |
+
log_names = settings.split(",")
|
| 551 |
+
|
| 552 |
+
def get_name_level_pair(name):
|
| 553 |
+
clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
|
| 554 |
+
clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
|
| 555 |
+
|
| 556 |
+
if name[0] == INCR_VERBOSITY_CHAR:
|
| 557 |
+
level = logging.DEBUG
|
| 558 |
+
elif name[0] == DECR_VERBOSITY_CHAR:
|
| 559 |
+
level = logging.ERROR
|
| 560 |
+
else:
|
| 561 |
+
level = logging.INFO
|
| 562 |
+
|
| 563 |
+
return clean_name, level
|
| 564 |
+
|
| 565 |
+
log_state = LogState()
|
| 566 |
+
|
| 567 |
+
for name in log_names:
|
| 568 |
+
name, level = get_name_level_pair(name)
|
| 569 |
+
if name == "all":
|
| 570 |
+
for log_qname in log_registry.get_log_qnames():
|
| 571 |
+
log_state.enable_log(log_qname, level)
|
| 572 |
+
|
| 573 |
+
for name in log_names:
|
| 574 |
+
name, level = get_name_level_pair(name)
|
| 575 |
+
|
| 576 |
+
if log_registry.is_log(name):
|
| 577 |
+
assert level is not None
|
| 578 |
+
log_qname = log_registry.log_alias_to_log_qname[name]
|
| 579 |
+
log_state.enable_log(log_qname, level)
|
| 580 |
+
elif log_registry.is_artifact(name):
|
| 581 |
+
log_state.enable_artifact(name)
|
| 582 |
+
elif name == "all":
|
| 583 |
+
continue
|
| 584 |
+
elif _is_valid_module(name):
|
| 585 |
+
if not _has_registered_parent(name):
|
| 586 |
+
log_registry.register_log(name, name)
|
| 587 |
+
else:
|
| 588 |
+
log_registry.register_child_log(name)
|
| 589 |
+
log_state.enable_log(name, level)
|
| 590 |
+
else:
|
| 591 |
+
raise ValueError(_invalid_settings_err_msg(settings))
|
| 592 |
+
|
| 593 |
+
return log_state
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
def _is_valid_module(qname):
|
| 597 |
+
try:
|
| 598 |
+
__import__(qname)
|
| 599 |
+
return True
|
| 600 |
+
except ImportError:
|
| 601 |
+
return False
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def _update_log_state_from_env():
|
| 605 |
+
global log_state
|
| 606 |
+
log_setting = os.environ.get(LOG_ENV_VAR, None)
|
| 607 |
+
if log_setting is not None:
|
| 608 |
+
log_state = _parse_log_settings(log_setting)
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def _has_registered_parent(log_qname):
|
| 612 |
+
cur_log = logging.getLogger(log_qname)
|
| 613 |
+
|
| 614 |
+
registered_log_qnames = log_registry.get_log_qnames()
|
| 615 |
+
|
| 616 |
+
while cur_log.parent:
|
| 617 |
+
if cur_log.name in registered_log_qnames:
|
| 618 |
+
return True
|
| 619 |
+
cur_log = cur_log.parent
|
| 620 |
+
|
| 621 |
+
return False
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
# apply custom formats to artifacts when necessary
|
| 625 |
+
class TorchLogsFormatter(logging.Formatter):
|
| 626 |
+
def format(self, record):
|
| 627 |
+
artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None)
|
| 628 |
+
if artifact_name is not None:
|
| 629 |
+
artifact_formatter = log_registry.artifact_log_formatters.get(
|
| 630 |
+
artifact_name, None
|
| 631 |
+
)
|
| 632 |
+
if artifact_formatter is not None:
|
| 633 |
+
return artifact_formatter.format(record)
|
| 634 |
+
|
| 635 |
+
record.message = record.getMessage()
|
| 636 |
+
record.asctime = self.formatTime(record, self.datefmt)
|
| 637 |
+
|
| 638 |
+
lines = record.message.split("\n")
|
| 639 |
+
record.rankprefix = ""
|
| 640 |
+
if dist.is_available() and dist.is_initialized():
|
| 641 |
+
record.rankprefix = f"[rank{dist.get_rank()}]:"
|
| 642 |
+
|
| 643 |
+
record.compileid = ""
|
| 644 |
+
if (
|
| 645 |
+
compile_id := torch._guards.CompileContext.current_compile_id()
|
| 646 |
+
) is not None:
|
| 647 |
+
record.compileid = f" [{compile_id}]"
|
| 648 |
+
|
| 649 |
+
prefix = f"{record.rankprefix}[{record.asctime}]{record.compileid} {record.name}: [{record.levelname}]"
|
| 650 |
+
return "\n".join(f"{prefix} {l}" for l in lines)
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
DEFAULT_FORMATTER = TorchLogsFormatter()
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
def _setup_handlers(create_handler_fn, log):
|
| 657 |
+
debug_handler = _track_handler(create_handler_fn())
|
| 658 |
+
debug_handler.setFormatter(DEFAULT_FORMATTER)
|
| 659 |
+
debug_handler.setLevel(logging.DEBUG)
|
| 660 |
+
log.addHandler(debug_handler)
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
handlers = WeakSet() # type: ignore[var-annotated]
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
# mark handlers that we've created
|
| 667 |
+
# so we don't modify user handlers
|
| 668 |
+
def _track_handler(handler):
|
| 669 |
+
handlers.add(handler)
|
| 670 |
+
return handler
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
def _is_torch_handler(handler):
|
| 674 |
+
return handler in handlers
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
# clears all torch handlers on specified loggers
|
| 678 |
+
def _clear_handlers(log):
|
| 679 |
+
to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)]
|
| 680 |
+
for handler in to_remove:
|
| 681 |
+
log.removeHandler(handler)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def _reset_logs():
|
| 685 |
+
# reset all registered logs
|
| 686 |
+
for log_qname in log_registry.get_log_qnames():
|
| 687 |
+
log = logging.getLogger(log_qname)
|
| 688 |
+
log.setLevel(logging.WARNING)
|
| 689 |
+
log.propagate = False
|
| 690 |
+
_clear_handlers(log)
|
| 691 |
+
|
| 692 |
+
# reset all artifact and child logs
|
| 693 |
+
for artifact_log_qname in itertools.chain(
|
| 694 |
+
log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames()
|
| 695 |
+
):
|
| 696 |
+
log = logging.getLogger(artifact_log_qname)
|
| 697 |
+
log.setLevel(logging.NOTSET)
|
| 698 |
+
log.propagate = True
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def _get_log_state():
|
| 702 |
+
return log_state
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
def _set_log_state(state):
|
| 706 |
+
global log_state
|
| 707 |
+
log_state = state
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
def _init_logs(log_file_name=None):
|
| 711 |
+
_reset_logs()
|
| 712 |
+
_update_log_state_from_env()
|
| 713 |
+
|
| 714 |
+
for log_qname, level in log_state.get_log_level_pairs():
|
| 715 |
+
log = logging.getLogger(log_qname)
|
| 716 |
+
log.setLevel(level)
|
| 717 |
+
|
| 718 |
+
# setup handlers for all registered loggers
|
| 719 |
+
for log_qname in log_registry.get_log_qnames():
|
| 720 |
+
log = logging.getLogger(log_qname)
|
| 721 |
+
_setup_handlers(
|
| 722 |
+
logging.StreamHandler,
|
| 723 |
+
log,
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
if log_file_name is not None:
|
| 727 |
+
_setup_handlers(
|
| 728 |
+
lambda: logging.FileHandler(log_file_name),
|
| 729 |
+
log,
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
# configure artifact loggers, note: this must happen last
|
| 733 |
+
# since the levels of ancestor loggers are taken into account
|
| 734 |
+
for artifact_log_qname in log_registry.get_artifact_log_qnames():
|
| 735 |
+
log = logging.getLogger(artifact_log_qname)
|
| 736 |
+
configure_artifact_log(log)
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
@functools.lru_cache(None)
|
| 740 |
+
def warning_once(logger_obj, *args, **kwargs):
|
| 741 |
+
"""
|
| 742 |
+
This function is similar to `logger.warning()`, but will emit the warning with the same message only once
|
| 743 |
+
Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
|
| 744 |
+
The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
|
| 745 |
+
another type of cache that includes the caller frame information in the hashing function.
|
| 746 |
+
"""
|
| 747 |
+
logger_obj.warning(*args, **kwargs)
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
class LazyString:
|
| 751 |
+
def __init__(self, func, *args, **kwargs):
|
| 752 |
+
self.func = func
|
| 753 |
+
self.args = args
|
| 754 |
+
self.kwargs = kwargs
|
| 755 |
+
|
| 756 |
+
def __str__(self):
|
| 757 |
+
return self.func(*self.args, **self.kwargs)
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
import torch._guards
|
| 761 |
+
import torch.distributed as dist
|
llava_next/lib/python3.10/site-packages/torch/_logging/_registrations.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa: B950
|
| 2 |
+
from ._internal import register_artifact, register_log
|
| 3 |
+
|
| 4 |
+
register_log("dynamo", "torch._dynamo")
|
| 5 |
+
register_log("aot", "torch._functorch.aot_autograd")
|
| 6 |
+
register_log("inductor", "torch._inductor")
|
| 7 |
+
register_log("dynamic", "torch.fx.experimental.symbolic_shapes")
|
| 8 |
+
register_log("torch", "torch")
|
| 9 |
+
register_log("distributed", "torch.distributed")
|
| 10 |
+
register_log("onnx", "torch.onnx")
|
| 11 |
+
|
| 12 |
+
register_artifact(
|
| 13 |
+
"guards",
|
| 14 |
+
"This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.",
|
| 15 |
+
visible=True,
|
| 16 |
+
)
|
| 17 |
+
register_artifact("verbose_guards", "", off_by_default=True)
|
| 18 |
+
register_artifact(
|
| 19 |
+
"bytecode",
|
| 20 |
+
"Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.",
|
| 21 |
+
off_by_default=True,
|
| 22 |
+
)
|
| 23 |
+
register_artifact(
|
| 24 |
+
"graph",
|
| 25 |
+
"Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ",
|
| 26 |
+
)
|
| 27 |
+
register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.")
|
| 28 |
+
register_artifact(
|
| 29 |
+
"graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph."
|
| 30 |
+
)
|
| 31 |
+
register_artifact(
|
| 32 |
+
"trace_source",
|
| 33 |
+
"As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`",
|
| 34 |
+
)
|
| 35 |
+
register_artifact(
|
| 36 |
+
"trace_call",
|
| 37 |
+
"Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.",
|
| 38 |
+
)
|
| 39 |
+
register_artifact(
|
| 40 |
+
"aot_graphs",
|
| 41 |
+
"Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor",
|
| 42 |
+
visible=True,
|
| 43 |
+
)
|
| 44 |
+
register_artifact(
|
| 45 |
+
"aot_joint_graph",
|
| 46 |
+
"Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning",
|
| 47 |
+
)
|
| 48 |
+
register_artifact(
|
| 49 |
+
"ddp_graphs",
|
| 50 |
+
"Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
|
| 51 |
+
)
|
| 52 |
+
register_artifact(
|
| 53 |
+
"recompiles",
|
| 54 |
+
"Prints the reason why we recompiled a graph. Very, very useful.",
|
| 55 |
+
visible=True,
|
| 56 |
+
)
|
| 57 |
+
register_artifact(
|
| 58 |
+
"graph_breaks",
|
| 59 |
+
"Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance",
|
| 60 |
+
visible=True,
|
| 61 |
+
)
|
| 62 |
+
register_artifact(
|
| 63 |
+
"not_implemented",
|
| 64 |
+
"Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to",
|
| 65 |
+
)
|
| 66 |
+
register_artifact(
|
| 67 |
+
"output_code",
|
| 68 |
+
"Prints the code that Inductor generates (either Triton or C++)",
|
| 69 |
+
off_by_default=True,
|
| 70 |
+
visible=True,
|
| 71 |
+
)
|
| 72 |
+
register_artifact(
|
| 73 |
+
"schedule",
|
| 74 |
+
"Inductor scheduler information. Useful if working on Inductor fusion algo",
|
| 75 |
+
off_by_default=True,
|
| 76 |
+
)
|
| 77 |
+
register_artifact("perf_hints", "", off_by_default=True)
|
| 78 |
+
register_artifact("onnx_diagnostics", "", off_by_default=True)
|
| 79 |
+
|
| 80 |
+
register_artifact("custom_format_test_artifact", "Testing only", log_format="")
|
llava_next/lib/python3.10/site-packages/torch/_numpy/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import fft, linalg, random
|
| 2 |
+
from ._dtypes import * # noqa: F403
|
| 3 |
+
from ._funcs import * # noqa: F403
|
| 4 |
+
from ._getlimits import finfo, iinfo
|
| 5 |
+
from ._ndarray import (
|
| 6 |
+
array,
|
| 7 |
+
asarray,
|
| 8 |
+
ascontiguousarray,
|
| 9 |
+
can_cast,
|
| 10 |
+
from_dlpack,
|
| 11 |
+
ndarray,
|
| 12 |
+
newaxis,
|
| 13 |
+
result_type,
|
| 14 |
+
)
|
| 15 |
+
from ._ufuncs import * # noqa: F403
|
| 16 |
+
from ._util import AxisError, UFuncTypeError
|
| 17 |
+
|
| 18 |
+
# from . import testing
|
| 19 |
+
|
| 20 |
+
alltrue = all
|
| 21 |
+
sometrue = any
|
| 22 |
+
|
| 23 |
+
inf = float("inf")
|
| 24 |
+
nan = float("nan")
|
| 25 |
+
from math import pi, e # isort: skip
|
| 26 |
+
|
| 27 |
+
False_ = False
|
| 28 |
+
True_ = True
|
llava_next/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py
ADDED
|
@@ -0,0 +1,879 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
# These two dicts are autogenerated with autogen/gen_dtypes.py,
|
| 4 |
+
# using numpy version 1.23.5.
|
| 5 |
+
|
| 6 |
+
_can_cast_dict = {
|
| 7 |
+
"no": {
|
| 8 |
+
torch.float16: {
|
| 9 |
+
torch.float16: True,
|
| 10 |
+
torch.float32: False,
|
| 11 |
+
torch.float64: False,
|
| 12 |
+
torch.complex64: False,
|
| 13 |
+
torch.complex128: False,
|
| 14 |
+
torch.uint8: False,
|
| 15 |
+
torch.int8: False,
|
| 16 |
+
torch.int16: False,
|
| 17 |
+
torch.int32: False,
|
| 18 |
+
torch.int64: False,
|
| 19 |
+
torch.bool: False,
|
| 20 |
+
},
|
| 21 |
+
torch.float32: {
|
| 22 |
+
torch.float16: False,
|
| 23 |
+
torch.float32: True,
|
| 24 |
+
torch.float64: False,
|
| 25 |
+
torch.complex64: False,
|
| 26 |
+
torch.complex128: False,
|
| 27 |
+
torch.uint8: False,
|
| 28 |
+
torch.int8: False,
|
| 29 |
+
torch.int16: False,
|
| 30 |
+
torch.int32: False,
|
| 31 |
+
torch.int64: False,
|
| 32 |
+
torch.bool: False,
|
| 33 |
+
},
|
| 34 |
+
torch.float64: {
|
| 35 |
+
torch.float16: False,
|
| 36 |
+
torch.float32: False,
|
| 37 |
+
torch.float64: True,
|
| 38 |
+
torch.complex64: False,
|
| 39 |
+
torch.complex128: False,
|
| 40 |
+
torch.uint8: False,
|
| 41 |
+
torch.int8: False,
|
| 42 |
+
torch.int16: False,
|
| 43 |
+
torch.int32: False,
|
| 44 |
+
torch.int64: False,
|
| 45 |
+
torch.bool: False,
|
| 46 |
+
},
|
| 47 |
+
torch.complex64: {
|
| 48 |
+
torch.float16: False,
|
| 49 |
+
torch.float32: False,
|
| 50 |
+
torch.float64: False,
|
| 51 |
+
torch.complex64: True,
|
| 52 |
+
torch.complex128: False,
|
| 53 |
+
torch.uint8: False,
|
| 54 |
+
torch.int8: False,
|
| 55 |
+
torch.int16: False,
|
| 56 |
+
torch.int32: False,
|
| 57 |
+
torch.int64: False,
|
| 58 |
+
torch.bool: False,
|
| 59 |
+
},
|
| 60 |
+
torch.complex128: {
|
| 61 |
+
torch.float16: False,
|
| 62 |
+
torch.float32: False,
|
| 63 |
+
torch.float64: False,
|
| 64 |
+
torch.complex64: False,
|
| 65 |
+
torch.complex128: True,
|
| 66 |
+
torch.uint8: False,
|
| 67 |
+
torch.int8: False,
|
| 68 |
+
torch.int16: False,
|
| 69 |
+
torch.int32: False,
|
| 70 |
+
torch.int64: False,
|
| 71 |
+
torch.bool: False,
|
| 72 |
+
},
|
| 73 |
+
torch.uint8: {
|
| 74 |
+
torch.float16: False,
|
| 75 |
+
torch.float32: False,
|
| 76 |
+
torch.float64: False,
|
| 77 |
+
torch.complex64: False,
|
| 78 |
+
torch.complex128: False,
|
| 79 |
+
torch.uint8: True,
|
| 80 |
+
torch.int8: False,
|
| 81 |
+
torch.int16: False,
|
| 82 |
+
torch.int32: False,
|
| 83 |
+
torch.int64: False,
|
| 84 |
+
torch.bool: False,
|
| 85 |
+
},
|
| 86 |
+
torch.int8: {
|
| 87 |
+
torch.float16: False,
|
| 88 |
+
torch.float32: False,
|
| 89 |
+
torch.float64: False,
|
| 90 |
+
torch.complex64: False,
|
| 91 |
+
torch.complex128: False,
|
| 92 |
+
torch.uint8: False,
|
| 93 |
+
torch.int8: True,
|
| 94 |
+
torch.int16: False,
|
| 95 |
+
torch.int32: False,
|
| 96 |
+
torch.int64: False,
|
| 97 |
+
torch.bool: False,
|
| 98 |
+
},
|
| 99 |
+
torch.int16: {
|
| 100 |
+
torch.float16: False,
|
| 101 |
+
torch.float32: False,
|
| 102 |
+
torch.float64: False,
|
| 103 |
+
torch.complex64: False,
|
| 104 |
+
torch.complex128: False,
|
| 105 |
+
torch.uint8: False,
|
| 106 |
+
torch.int8: False,
|
| 107 |
+
torch.int16: True,
|
| 108 |
+
torch.int32: False,
|
| 109 |
+
torch.int64: False,
|
| 110 |
+
torch.bool: False,
|
| 111 |
+
},
|
| 112 |
+
torch.int32: {
|
| 113 |
+
torch.float16: False,
|
| 114 |
+
torch.float32: False,
|
| 115 |
+
torch.float64: False,
|
| 116 |
+
torch.complex64: False,
|
| 117 |
+
torch.complex128: False,
|
| 118 |
+
torch.uint8: False,
|
| 119 |
+
torch.int8: False,
|
| 120 |
+
torch.int16: False,
|
| 121 |
+
torch.int32: True,
|
| 122 |
+
torch.int64: False,
|
| 123 |
+
torch.bool: False,
|
| 124 |
+
},
|
| 125 |
+
torch.int64: {
|
| 126 |
+
torch.float16: False,
|
| 127 |
+
torch.float32: False,
|
| 128 |
+
torch.float64: False,
|
| 129 |
+
torch.complex64: False,
|
| 130 |
+
torch.complex128: False,
|
| 131 |
+
torch.uint8: False,
|
| 132 |
+
torch.int8: False,
|
| 133 |
+
torch.int16: False,
|
| 134 |
+
torch.int32: False,
|
| 135 |
+
torch.int64: True,
|
| 136 |
+
torch.bool: False,
|
| 137 |
+
},
|
| 138 |
+
torch.bool: {
|
| 139 |
+
torch.float16: False,
|
| 140 |
+
torch.float32: False,
|
| 141 |
+
torch.float64: False,
|
| 142 |
+
torch.complex64: False,
|
| 143 |
+
torch.complex128: False,
|
| 144 |
+
torch.uint8: False,
|
| 145 |
+
torch.int8: False,
|
| 146 |
+
torch.int16: False,
|
| 147 |
+
torch.int32: False,
|
| 148 |
+
torch.int64: False,
|
| 149 |
+
torch.bool: True,
|
| 150 |
+
},
|
| 151 |
+
},
|
| 152 |
+
"equiv": {
|
| 153 |
+
torch.float16: {
|
| 154 |
+
torch.float16: True,
|
| 155 |
+
torch.float32: False,
|
| 156 |
+
torch.float64: False,
|
| 157 |
+
torch.complex64: False,
|
| 158 |
+
torch.complex128: False,
|
| 159 |
+
torch.uint8: False,
|
| 160 |
+
torch.int8: False,
|
| 161 |
+
torch.int16: False,
|
| 162 |
+
torch.int32: False,
|
| 163 |
+
torch.int64: False,
|
| 164 |
+
torch.bool: False,
|
| 165 |
+
},
|
| 166 |
+
torch.float32: {
|
| 167 |
+
torch.float16: False,
|
| 168 |
+
torch.float32: True,
|
| 169 |
+
torch.float64: False,
|
| 170 |
+
torch.complex64: False,
|
| 171 |
+
torch.complex128: False,
|
| 172 |
+
torch.uint8: False,
|
| 173 |
+
torch.int8: False,
|
| 174 |
+
torch.int16: False,
|
| 175 |
+
torch.int32: False,
|
| 176 |
+
torch.int64: False,
|
| 177 |
+
torch.bool: False,
|
| 178 |
+
},
|
| 179 |
+
torch.float64: {
|
| 180 |
+
torch.float16: False,
|
| 181 |
+
torch.float32: False,
|
| 182 |
+
torch.float64: True,
|
| 183 |
+
torch.complex64: False,
|
| 184 |
+
torch.complex128: False,
|
| 185 |
+
torch.uint8: False,
|
| 186 |
+
torch.int8: False,
|
| 187 |
+
torch.int16: False,
|
| 188 |
+
torch.int32: False,
|
| 189 |
+
torch.int64: False,
|
| 190 |
+
torch.bool: False,
|
| 191 |
+
},
|
| 192 |
+
torch.complex64: {
|
| 193 |
+
torch.float16: False,
|
| 194 |
+
torch.float32: False,
|
| 195 |
+
torch.float64: False,
|
| 196 |
+
torch.complex64: True,
|
| 197 |
+
torch.complex128: False,
|
| 198 |
+
torch.uint8: False,
|
| 199 |
+
torch.int8: False,
|
| 200 |
+
torch.int16: False,
|
| 201 |
+
torch.int32: False,
|
| 202 |
+
torch.int64: False,
|
| 203 |
+
torch.bool: False,
|
| 204 |
+
},
|
| 205 |
+
torch.complex128: {
|
| 206 |
+
torch.float16: False,
|
| 207 |
+
torch.float32: False,
|
| 208 |
+
torch.float64: False,
|
| 209 |
+
torch.complex64: False,
|
| 210 |
+
torch.complex128: True,
|
| 211 |
+
torch.uint8: False,
|
| 212 |
+
torch.int8: False,
|
| 213 |
+
torch.int16: False,
|
| 214 |
+
torch.int32: False,
|
| 215 |
+
torch.int64: False,
|
| 216 |
+
torch.bool: False,
|
| 217 |
+
},
|
| 218 |
+
torch.uint8: {
|
| 219 |
+
torch.float16: False,
|
| 220 |
+
torch.float32: False,
|
| 221 |
+
torch.float64: False,
|
| 222 |
+
torch.complex64: False,
|
| 223 |
+
torch.complex128: False,
|
| 224 |
+
torch.uint8: True,
|
| 225 |
+
torch.int8: False,
|
| 226 |
+
torch.int16: False,
|
| 227 |
+
torch.int32: False,
|
| 228 |
+
torch.int64: False,
|
| 229 |
+
torch.bool: False,
|
| 230 |
+
},
|
| 231 |
+
torch.int8: {
|
| 232 |
+
torch.float16: False,
|
| 233 |
+
torch.float32: False,
|
| 234 |
+
torch.float64: False,
|
| 235 |
+
torch.complex64: False,
|
| 236 |
+
torch.complex128: False,
|
| 237 |
+
torch.uint8: False,
|
| 238 |
+
torch.int8: True,
|
| 239 |
+
torch.int16: False,
|
| 240 |
+
torch.int32: False,
|
| 241 |
+
torch.int64: False,
|
| 242 |
+
torch.bool: False,
|
| 243 |
+
},
|
| 244 |
+
torch.int16: {
|
| 245 |
+
torch.float16: False,
|
| 246 |
+
torch.float32: False,
|
| 247 |
+
torch.float64: False,
|
| 248 |
+
torch.complex64: False,
|
| 249 |
+
torch.complex128: False,
|
| 250 |
+
torch.uint8: False,
|
| 251 |
+
torch.int8: False,
|
| 252 |
+
torch.int16: True,
|
| 253 |
+
torch.int32: False,
|
| 254 |
+
torch.int64: False,
|
| 255 |
+
torch.bool: False,
|
| 256 |
+
},
|
| 257 |
+
torch.int32: {
|
| 258 |
+
torch.float16: False,
|
| 259 |
+
torch.float32: False,
|
| 260 |
+
torch.float64: False,
|
| 261 |
+
torch.complex64: False,
|
| 262 |
+
torch.complex128: False,
|
| 263 |
+
torch.uint8: False,
|
| 264 |
+
torch.int8: False,
|
| 265 |
+
torch.int16: False,
|
| 266 |
+
torch.int32: True,
|
| 267 |
+
torch.int64: False,
|
| 268 |
+
torch.bool: False,
|
| 269 |
+
},
|
| 270 |
+
torch.int64: {
|
| 271 |
+
torch.float16: False,
|
| 272 |
+
torch.float32: False,
|
| 273 |
+
torch.float64: False,
|
| 274 |
+
torch.complex64: False,
|
| 275 |
+
torch.complex128: False,
|
| 276 |
+
torch.uint8: False,
|
| 277 |
+
torch.int8: False,
|
| 278 |
+
torch.int16: False,
|
| 279 |
+
torch.int32: False,
|
| 280 |
+
torch.int64: True,
|
| 281 |
+
torch.bool: False,
|
| 282 |
+
},
|
| 283 |
+
torch.bool: {
|
| 284 |
+
torch.float16: False,
|
| 285 |
+
torch.float32: False,
|
| 286 |
+
torch.float64: False,
|
| 287 |
+
torch.complex64: False,
|
| 288 |
+
torch.complex128: False,
|
| 289 |
+
torch.uint8: False,
|
| 290 |
+
torch.int8: False,
|
| 291 |
+
torch.int16: False,
|
| 292 |
+
torch.int32: False,
|
| 293 |
+
torch.int64: False,
|
| 294 |
+
torch.bool: True,
|
| 295 |
+
},
|
| 296 |
+
},
|
| 297 |
+
"safe": {
|
| 298 |
+
torch.float16: {
|
| 299 |
+
torch.float16: True,
|
| 300 |
+
torch.float32: True,
|
| 301 |
+
torch.float64: True,
|
| 302 |
+
torch.complex64: True,
|
| 303 |
+
torch.complex128: True,
|
| 304 |
+
torch.uint8: False,
|
| 305 |
+
torch.int8: False,
|
| 306 |
+
torch.int16: False,
|
| 307 |
+
torch.int32: False,
|
| 308 |
+
torch.int64: False,
|
| 309 |
+
torch.bool: False,
|
| 310 |
+
},
|
| 311 |
+
torch.float32: {
|
| 312 |
+
torch.float16: False,
|
| 313 |
+
torch.float32: True,
|
| 314 |
+
torch.float64: True,
|
| 315 |
+
torch.complex64: True,
|
| 316 |
+
torch.complex128: True,
|
| 317 |
+
torch.uint8: False,
|
| 318 |
+
torch.int8: False,
|
| 319 |
+
torch.int16: False,
|
| 320 |
+
torch.int32: False,
|
| 321 |
+
torch.int64: False,
|
| 322 |
+
torch.bool: False,
|
| 323 |
+
},
|
| 324 |
+
torch.float64: {
|
| 325 |
+
torch.float16: False,
|
| 326 |
+
torch.float32: False,
|
| 327 |
+
torch.float64: True,
|
| 328 |
+
torch.complex64: False,
|
| 329 |
+
torch.complex128: True,
|
| 330 |
+
torch.uint8: False,
|
| 331 |
+
torch.int8: False,
|
| 332 |
+
torch.int16: False,
|
| 333 |
+
torch.int32: False,
|
| 334 |
+
torch.int64: False,
|
| 335 |
+
torch.bool: False,
|
| 336 |
+
},
|
| 337 |
+
torch.complex64: {
|
| 338 |
+
torch.float16: False,
|
| 339 |
+
torch.float32: False,
|
| 340 |
+
torch.float64: False,
|
| 341 |
+
torch.complex64: True,
|
| 342 |
+
torch.complex128: True,
|
| 343 |
+
torch.uint8: False,
|
| 344 |
+
torch.int8: False,
|
| 345 |
+
torch.int16: False,
|
| 346 |
+
torch.int32: False,
|
| 347 |
+
torch.int64: False,
|
| 348 |
+
torch.bool: False,
|
| 349 |
+
},
|
| 350 |
+
torch.complex128: {
|
| 351 |
+
torch.float16: False,
|
| 352 |
+
torch.float32: False,
|
| 353 |
+
torch.float64: False,
|
| 354 |
+
torch.complex64: False,
|
| 355 |
+
torch.complex128: True,
|
| 356 |
+
torch.uint8: False,
|
| 357 |
+
torch.int8: False,
|
| 358 |
+
torch.int16: False,
|
| 359 |
+
torch.int32: False,
|
| 360 |
+
torch.int64: False,
|
| 361 |
+
torch.bool: False,
|
| 362 |
+
},
|
| 363 |
+
torch.uint8: {
|
| 364 |
+
torch.float16: True,
|
| 365 |
+
torch.float32: True,
|
| 366 |
+
torch.float64: True,
|
| 367 |
+
torch.complex64: True,
|
| 368 |
+
torch.complex128: True,
|
| 369 |
+
torch.uint8: True,
|
| 370 |
+
torch.int8: False,
|
| 371 |
+
torch.int16: True,
|
| 372 |
+
torch.int32: True,
|
| 373 |
+
torch.int64: True,
|
| 374 |
+
torch.bool: False,
|
| 375 |
+
},
|
| 376 |
+
torch.int8: {
|
| 377 |
+
torch.float16: True,
|
| 378 |
+
torch.float32: True,
|
| 379 |
+
torch.float64: True,
|
| 380 |
+
torch.complex64: True,
|
| 381 |
+
torch.complex128: True,
|
| 382 |
+
torch.uint8: False,
|
| 383 |
+
torch.int8: True,
|
| 384 |
+
torch.int16: True,
|
| 385 |
+
torch.int32: True,
|
| 386 |
+
torch.int64: True,
|
| 387 |
+
torch.bool: False,
|
| 388 |
+
},
|
| 389 |
+
torch.int16: {
|
| 390 |
+
torch.float16: False,
|
| 391 |
+
torch.float32: True,
|
| 392 |
+
torch.float64: True,
|
| 393 |
+
torch.complex64: True,
|
| 394 |
+
torch.complex128: True,
|
| 395 |
+
torch.uint8: False,
|
| 396 |
+
torch.int8: False,
|
| 397 |
+
torch.int16: True,
|
| 398 |
+
torch.int32: True,
|
| 399 |
+
torch.int64: True,
|
| 400 |
+
torch.bool: False,
|
| 401 |
+
},
|
| 402 |
+
torch.int32: {
|
| 403 |
+
torch.float16: False,
|
| 404 |
+
torch.float32: False,
|
| 405 |
+
torch.float64: True,
|
| 406 |
+
torch.complex64: False,
|
| 407 |
+
torch.complex128: True,
|
| 408 |
+
torch.uint8: False,
|
| 409 |
+
torch.int8: False,
|
| 410 |
+
torch.int16: False,
|
| 411 |
+
torch.int32: True,
|
| 412 |
+
torch.int64: True,
|
| 413 |
+
torch.bool: False,
|
| 414 |
+
},
|
| 415 |
+
torch.int64: {
|
| 416 |
+
torch.float16: False,
|
| 417 |
+
torch.float32: False,
|
| 418 |
+
torch.float64: True,
|
| 419 |
+
torch.complex64: False,
|
| 420 |
+
torch.complex128: True,
|
| 421 |
+
torch.uint8: False,
|
| 422 |
+
torch.int8: False,
|
| 423 |
+
torch.int16: False,
|
| 424 |
+
torch.int32: False,
|
| 425 |
+
torch.int64: True,
|
| 426 |
+
torch.bool: False,
|
| 427 |
+
},
|
| 428 |
+
torch.bool: {
|
| 429 |
+
torch.float16: True,
|
| 430 |
+
torch.float32: True,
|
| 431 |
+
torch.float64: True,
|
| 432 |
+
torch.complex64: True,
|
| 433 |
+
torch.complex128: True,
|
| 434 |
+
torch.uint8: True,
|
| 435 |
+
torch.int8: True,
|
| 436 |
+
torch.int16: True,
|
| 437 |
+
torch.int32: True,
|
| 438 |
+
torch.int64: True,
|
| 439 |
+
torch.bool: True,
|
| 440 |
+
},
|
| 441 |
+
},
|
| 442 |
+
"same_kind": {
|
| 443 |
+
torch.float16: {
|
| 444 |
+
torch.float16: True,
|
| 445 |
+
torch.float32: True,
|
| 446 |
+
torch.float64: True,
|
| 447 |
+
torch.complex64: True,
|
| 448 |
+
torch.complex128: True,
|
| 449 |
+
torch.uint8: False,
|
| 450 |
+
torch.int8: False,
|
| 451 |
+
torch.int16: False,
|
| 452 |
+
torch.int32: False,
|
| 453 |
+
torch.int64: False,
|
| 454 |
+
torch.bool: False,
|
| 455 |
+
},
|
| 456 |
+
torch.float32: {
|
| 457 |
+
torch.float16: True,
|
| 458 |
+
torch.float32: True,
|
| 459 |
+
torch.float64: True,
|
| 460 |
+
torch.complex64: True,
|
| 461 |
+
torch.complex128: True,
|
| 462 |
+
torch.uint8: False,
|
| 463 |
+
torch.int8: False,
|
| 464 |
+
torch.int16: False,
|
| 465 |
+
torch.int32: False,
|
| 466 |
+
torch.int64: False,
|
| 467 |
+
torch.bool: False,
|
| 468 |
+
},
|
| 469 |
+
torch.float64: {
|
| 470 |
+
torch.float16: True,
|
| 471 |
+
torch.float32: True,
|
| 472 |
+
torch.float64: True,
|
| 473 |
+
torch.complex64: True,
|
| 474 |
+
torch.complex128: True,
|
| 475 |
+
torch.uint8: False,
|
| 476 |
+
torch.int8: False,
|
| 477 |
+
torch.int16: False,
|
| 478 |
+
torch.int32: False,
|
| 479 |
+
torch.int64: False,
|
| 480 |
+
torch.bool: False,
|
| 481 |
+
},
|
| 482 |
+
torch.complex64: {
|
| 483 |
+
torch.float16: False,
|
| 484 |
+
torch.float32: False,
|
| 485 |
+
torch.float64: False,
|
| 486 |
+
torch.complex64: True,
|
| 487 |
+
torch.complex128: True,
|
| 488 |
+
torch.uint8: False,
|
| 489 |
+
torch.int8: False,
|
| 490 |
+
torch.int16: False,
|
| 491 |
+
torch.int32: False,
|
| 492 |
+
torch.int64: False,
|
| 493 |
+
torch.bool: False,
|
| 494 |
+
},
|
| 495 |
+
torch.complex128: {
|
| 496 |
+
torch.float16: False,
|
| 497 |
+
torch.float32: False,
|
| 498 |
+
torch.float64: False,
|
| 499 |
+
torch.complex64: True,
|
| 500 |
+
torch.complex128: True,
|
| 501 |
+
torch.uint8: False,
|
| 502 |
+
torch.int8: False,
|
| 503 |
+
torch.int16: False,
|
| 504 |
+
torch.int32: False,
|
| 505 |
+
torch.int64: False,
|
| 506 |
+
torch.bool: False,
|
| 507 |
+
},
|
| 508 |
+
torch.uint8: {
|
| 509 |
+
torch.float16: True,
|
| 510 |
+
torch.float32: True,
|
| 511 |
+
torch.float64: True,
|
| 512 |
+
torch.complex64: True,
|
| 513 |
+
torch.complex128: True,
|
| 514 |
+
torch.uint8: True,
|
| 515 |
+
torch.int8: True,
|
| 516 |
+
torch.int16: True,
|
| 517 |
+
torch.int32: True,
|
| 518 |
+
torch.int64: True,
|
| 519 |
+
torch.bool: False,
|
| 520 |
+
},
|
| 521 |
+
torch.int8: {
|
| 522 |
+
torch.float16: True,
|
| 523 |
+
torch.float32: True,
|
| 524 |
+
torch.float64: True,
|
| 525 |
+
torch.complex64: True,
|
| 526 |
+
torch.complex128: True,
|
| 527 |
+
torch.uint8: False,
|
| 528 |
+
torch.int8: True,
|
| 529 |
+
torch.int16: True,
|
| 530 |
+
torch.int32: True,
|
| 531 |
+
torch.int64: True,
|
| 532 |
+
torch.bool: False,
|
| 533 |
+
},
|
| 534 |
+
torch.int16: {
|
| 535 |
+
torch.float16: True,
|
| 536 |
+
torch.float32: True,
|
| 537 |
+
torch.float64: True,
|
| 538 |
+
torch.complex64: True,
|
| 539 |
+
torch.complex128: True,
|
| 540 |
+
torch.uint8: False,
|
| 541 |
+
torch.int8: True,
|
| 542 |
+
torch.int16: True,
|
| 543 |
+
torch.int32: True,
|
| 544 |
+
torch.int64: True,
|
| 545 |
+
torch.bool: False,
|
| 546 |
+
},
|
| 547 |
+
torch.int32: {
|
| 548 |
+
torch.float16: True,
|
| 549 |
+
torch.float32: True,
|
| 550 |
+
torch.float64: True,
|
| 551 |
+
torch.complex64: True,
|
| 552 |
+
torch.complex128: True,
|
| 553 |
+
torch.uint8: False,
|
| 554 |
+
torch.int8: True,
|
| 555 |
+
torch.int16: True,
|
| 556 |
+
torch.int32: True,
|
| 557 |
+
torch.int64: True,
|
| 558 |
+
torch.bool: False,
|
| 559 |
+
},
|
| 560 |
+
torch.int64: {
|
| 561 |
+
torch.float16: True,
|
| 562 |
+
torch.float32: True,
|
| 563 |
+
torch.float64: True,
|
| 564 |
+
torch.complex64: True,
|
| 565 |
+
torch.complex128: True,
|
| 566 |
+
torch.uint8: False,
|
| 567 |
+
torch.int8: True,
|
| 568 |
+
torch.int16: True,
|
| 569 |
+
torch.int32: True,
|
| 570 |
+
torch.int64: True,
|
| 571 |
+
torch.bool: False,
|
| 572 |
+
},
|
| 573 |
+
torch.bool: {
|
| 574 |
+
torch.float16: True,
|
| 575 |
+
torch.float32: True,
|
| 576 |
+
torch.float64: True,
|
| 577 |
+
torch.complex64: True,
|
| 578 |
+
torch.complex128: True,
|
| 579 |
+
torch.uint8: True,
|
| 580 |
+
torch.int8: True,
|
| 581 |
+
torch.int16: True,
|
| 582 |
+
torch.int32: True,
|
| 583 |
+
torch.int64: True,
|
| 584 |
+
torch.bool: True,
|
| 585 |
+
},
|
| 586 |
+
},
|
| 587 |
+
"unsafe": {
|
| 588 |
+
torch.float16: {
|
| 589 |
+
torch.float16: True,
|
| 590 |
+
torch.float32: True,
|
| 591 |
+
torch.float64: True,
|
| 592 |
+
torch.complex64: True,
|
| 593 |
+
torch.complex128: True,
|
| 594 |
+
torch.uint8: True,
|
| 595 |
+
torch.int8: True,
|
| 596 |
+
torch.int16: True,
|
| 597 |
+
torch.int32: True,
|
| 598 |
+
torch.int64: True,
|
| 599 |
+
torch.bool: True,
|
| 600 |
+
},
|
| 601 |
+
torch.float32: {
|
| 602 |
+
torch.float16: True,
|
| 603 |
+
torch.float32: True,
|
| 604 |
+
torch.float64: True,
|
| 605 |
+
torch.complex64: True,
|
| 606 |
+
torch.complex128: True,
|
| 607 |
+
torch.uint8: True,
|
| 608 |
+
torch.int8: True,
|
| 609 |
+
torch.int16: True,
|
| 610 |
+
torch.int32: True,
|
| 611 |
+
torch.int64: True,
|
| 612 |
+
torch.bool: True,
|
| 613 |
+
},
|
| 614 |
+
torch.float64: {
|
| 615 |
+
torch.float16: True,
|
| 616 |
+
torch.float32: True,
|
| 617 |
+
torch.float64: True,
|
| 618 |
+
torch.complex64: True,
|
| 619 |
+
torch.complex128: True,
|
| 620 |
+
torch.uint8: True,
|
| 621 |
+
torch.int8: True,
|
| 622 |
+
torch.int16: True,
|
| 623 |
+
torch.int32: True,
|
| 624 |
+
torch.int64: True,
|
| 625 |
+
torch.bool: True,
|
| 626 |
+
},
|
| 627 |
+
torch.complex64: {
|
| 628 |
+
torch.float16: True,
|
| 629 |
+
torch.float32: True,
|
| 630 |
+
torch.float64: True,
|
| 631 |
+
torch.complex64: True,
|
| 632 |
+
torch.complex128: True,
|
| 633 |
+
torch.uint8: True,
|
| 634 |
+
torch.int8: True,
|
| 635 |
+
torch.int16: True,
|
| 636 |
+
torch.int32: True,
|
| 637 |
+
torch.int64: True,
|
| 638 |
+
torch.bool: True,
|
| 639 |
+
},
|
| 640 |
+
torch.complex128: {
|
| 641 |
+
torch.float16: True,
|
| 642 |
+
torch.float32: True,
|
| 643 |
+
torch.float64: True,
|
| 644 |
+
torch.complex64: True,
|
| 645 |
+
torch.complex128: True,
|
| 646 |
+
torch.uint8: True,
|
| 647 |
+
torch.int8: True,
|
| 648 |
+
torch.int16: True,
|
| 649 |
+
torch.int32: True,
|
| 650 |
+
torch.int64: True,
|
| 651 |
+
torch.bool: True,
|
| 652 |
+
},
|
| 653 |
+
torch.uint8: {
|
| 654 |
+
torch.float16: True,
|
| 655 |
+
torch.float32: True,
|
| 656 |
+
torch.float64: True,
|
| 657 |
+
torch.complex64: True,
|
| 658 |
+
torch.complex128: True,
|
| 659 |
+
torch.uint8: True,
|
| 660 |
+
torch.int8: True,
|
| 661 |
+
torch.int16: True,
|
| 662 |
+
torch.int32: True,
|
| 663 |
+
torch.int64: True,
|
| 664 |
+
torch.bool: True,
|
| 665 |
+
},
|
| 666 |
+
torch.int8: {
|
| 667 |
+
torch.float16: True,
|
| 668 |
+
torch.float32: True,
|
| 669 |
+
torch.float64: True,
|
| 670 |
+
torch.complex64: True,
|
| 671 |
+
torch.complex128: True,
|
| 672 |
+
torch.uint8: True,
|
| 673 |
+
torch.int8: True,
|
| 674 |
+
torch.int16: True,
|
| 675 |
+
torch.int32: True,
|
| 676 |
+
torch.int64: True,
|
| 677 |
+
torch.bool: True,
|
| 678 |
+
},
|
| 679 |
+
torch.int16: {
|
| 680 |
+
torch.float16: True,
|
| 681 |
+
torch.float32: True,
|
| 682 |
+
torch.float64: True,
|
| 683 |
+
torch.complex64: True,
|
| 684 |
+
torch.complex128: True,
|
| 685 |
+
torch.uint8: True,
|
| 686 |
+
torch.int8: True,
|
| 687 |
+
torch.int16: True,
|
| 688 |
+
torch.int32: True,
|
| 689 |
+
torch.int64: True,
|
| 690 |
+
torch.bool: True,
|
| 691 |
+
},
|
| 692 |
+
torch.int32: {
|
| 693 |
+
torch.float16: True,
|
| 694 |
+
torch.float32: True,
|
| 695 |
+
torch.float64: True,
|
| 696 |
+
torch.complex64: True,
|
| 697 |
+
torch.complex128: True,
|
| 698 |
+
torch.uint8: True,
|
| 699 |
+
torch.int8: True,
|
| 700 |
+
torch.int16: True,
|
| 701 |
+
torch.int32: True,
|
| 702 |
+
torch.int64: True,
|
| 703 |
+
torch.bool: True,
|
| 704 |
+
},
|
| 705 |
+
torch.int64: {
|
| 706 |
+
torch.float16: True,
|
| 707 |
+
torch.float32: True,
|
| 708 |
+
torch.float64: True,
|
| 709 |
+
torch.complex64: True,
|
| 710 |
+
torch.complex128: True,
|
| 711 |
+
torch.uint8: True,
|
| 712 |
+
torch.int8: True,
|
| 713 |
+
torch.int16: True,
|
| 714 |
+
torch.int32: True,
|
| 715 |
+
torch.int64: True,
|
| 716 |
+
torch.bool: True,
|
| 717 |
+
},
|
| 718 |
+
torch.bool: {
|
| 719 |
+
torch.float16: True,
|
| 720 |
+
torch.float32: True,
|
| 721 |
+
torch.float64: True,
|
| 722 |
+
torch.complex64: True,
|
| 723 |
+
torch.complex128: True,
|
| 724 |
+
torch.uint8: True,
|
| 725 |
+
torch.int8: True,
|
| 726 |
+
torch.int16: True,
|
| 727 |
+
torch.int32: True,
|
| 728 |
+
torch.int64: True,
|
| 729 |
+
torch.bool: True,
|
| 730 |
+
},
|
| 731 |
+
},
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
_result_type_dict = {
|
| 736 |
+
torch.float16: {
|
| 737 |
+
torch.float16: torch.float16,
|
| 738 |
+
torch.float32: torch.float32,
|
| 739 |
+
torch.float64: torch.float64,
|
| 740 |
+
torch.complex64: torch.complex64,
|
| 741 |
+
torch.complex128: torch.complex128,
|
| 742 |
+
torch.uint8: torch.float16,
|
| 743 |
+
torch.int8: torch.float16,
|
| 744 |
+
torch.int16: torch.float32,
|
| 745 |
+
torch.int32: torch.float64,
|
| 746 |
+
torch.int64: torch.float64,
|
| 747 |
+
torch.bool: torch.float16,
|
| 748 |
+
},
|
| 749 |
+
torch.float32: {
|
| 750 |
+
torch.float16: torch.float32,
|
| 751 |
+
torch.float32: torch.float32,
|
| 752 |
+
torch.float64: torch.float64,
|
| 753 |
+
torch.complex64: torch.complex64,
|
| 754 |
+
torch.complex128: torch.complex128,
|
| 755 |
+
torch.uint8: torch.float32,
|
| 756 |
+
torch.int8: torch.float32,
|
| 757 |
+
torch.int16: torch.float32,
|
| 758 |
+
torch.int32: torch.float64,
|
| 759 |
+
torch.int64: torch.float64,
|
| 760 |
+
torch.bool: torch.float32,
|
| 761 |
+
},
|
| 762 |
+
torch.float64: {
|
| 763 |
+
torch.float16: torch.float64,
|
| 764 |
+
torch.float32: torch.float64,
|
| 765 |
+
torch.float64: torch.float64,
|
| 766 |
+
torch.complex64: torch.complex128,
|
| 767 |
+
torch.complex128: torch.complex128,
|
| 768 |
+
torch.uint8: torch.float64,
|
| 769 |
+
torch.int8: torch.float64,
|
| 770 |
+
torch.int16: torch.float64,
|
| 771 |
+
torch.int32: torch.float64,
|
| 772 |
+
torch.int64: torch.float64,
|
| 773 |
+
torch.bool: torch.float64,
|
| 774 |
+
},
|
| 775 |
+
torch.complex64: {
|
| 776 |
+
torch.float16: torch.complex64,
|
| 777 |
+
torch.float32: torch.complex64,
|
| 778 |
+
torch.float64: torch.complex128,
|
| 779 |
+
torch.complex64: torch.complex64,
|
| 780 |
+
torch.complex128: torch.complex128,
|
| 781 |
+
torch.uint8: torch.complex64,
|
| 782 |
+
torch.int8: torch.complex64,
|
| 783 |
+
torch.int16: torch.complex64,
|
| 784 |
+
torch.int32: torch.complex128,
|
| 785 |
+
torch.int64: torch.complex128,
|
| 786 |
+
torch.bool: torch.complex64,
|
| 787 |
+
},
|
| 788 |
+
torch.complex128: {
|
| 789 |
+
torch.float16: torch.complex128,
|
| 790 |
+
torch.float32: torch.complex128,
|
| 791 |
+
torch.float64: torch.complex128,
|
| 792 |
+
torch.complex64: torch.complex128,
|
| 793 |
+
torch.complex128: torch.complex128,
|
| 794 |
+
torch.uint8: torch.complex128,
|
| 795 |
+
torch.int8: torch.complex128,
|
| 796 |
+
torch.int16: torch.complex128,
|
| 797 |
+
torch.int32: torch.complex128,
|
| 798 |
+
torch.int64: torch.complex128,
|
| 799 |
+
torch.bool: torch.complex128,
|
| 800 |
+
},
|
| 801 |
+
torch.uint8: {
|
| 802 |
+
torch.float16: torch.float16,
|
| 803 |
+
torch.float32: torch.float32,
|
| 804 |
+
torch.float64: torch.float64,
|
| 805 |
+
torch.complex64: torch.complex64,
|
| 806 |
+
torch.complex128: torch.complex128,
|
| 807 |
+
torch.uint8: torch.uint8,
|
| 808 |
+
torch.int8: torch.int16,
|
| 809 |
+
torch.int16: torch.int16,
|
| 810 |
+
torch.int32: torch.int32,
|
| 811 |
+
torch.int64: torch.int64,
|
| 812 |
+
torch.bool: torch.uint8,
|
| 813 |
+
},
|
| 814 |
+
torch.int8: {
|
| 815 |
+
torch.float16: torch.float16,
|
| 816 |
+
torch.float32: torch.float32,
|
| 817 |
+
torch.float64: torch.float64,
|
| 818 |
+
torch.complex64: torch.complex64,
|
| 819 |
+
torch.complex128: torch.complex128,
|
| 820 |
+
torch.uint8: torch.int16,
|
| 821 |
+
torch.int8: torch.int8,
|
| 822 |
+
torch.int16: torch.int16,
|
| 823 |
+
torch.int32: torch.int32,
|
| 824 |
+
torch.int64: torch.int64,
|
| 825 |
+
torch.bool: torch.int8,
|
| 826 |
+
},
|
| 827 |
+
torch.int16: {
|
| 828 |
+
torch.float16: torch.float32,
|
| 829 |
+
torch.float32: torch.float32,
|
| 830 |
+
torch.float64: torch.float64,
|
| 831 |
+
torch.complex64: torch.complex64,
|
| 832 |
+
torch.complex128: torch.complex128,
|
| 833 |
+
torch.uint8: torch.int16,
|
| 834 |
+
torch.int8: torch.int16,
|
| 835 |
+
torch.int16: torch.int16,
|
| 836 |
+
torch.int32: torch.int32,
|
| 837 |
+
torch.int64: torch.int64,
|
| 838 |
+
torch.bool: torch.int16,
|
| 839 |
+
},
|
| 840 |
+
torch.int32: {
|
| 841 |
+
torch.float16: torch.float64,
|
| 842 |
+
torch.float32: torch.float64,
|
| 843 |
+
torch.float64: torch.float64,
|
| 844 |
+
torch.complex64: torch.complex128,
|
| 845 |
+
torch.complex128: torch.complex128,
|
| 846 |
+
torch.uint8: torch.int32,
|
| 847 |
+
torch.int8: torch.int32,
|
| 848 |
+
torch.int16: torch.int32,
|
| 849 |
+
torch.int32: torch.int32,
|
| 850 |
+
torch.int64: torch.int64,
|
| 851 |
+
torch.bool: torch.int32,
|
| 852 |
+
},
|
| 853 |
+
torch.int64: {
|
| 854 |
+
torch.float16: torch.float64,
|
| 855 |
+
torch.float32: torch.float64,
|
| 856 |
+
torch.float64: torch.float64,
|
| 857 |
+
torch.complex64: torch.complex128,
|
| 858 |
+
torch.complex128: torch.complex128,
|
| 859 |
+
torch.uint8: torch.int64,
|
| 860 |
+
torch.int8: torch.int64,
|
| 861 |
+
torch.int16: torch.int64,
|
| 862 |
+
torch.int32: torch.int64,
|
| 863 |
+
torch.int64: torch.int64,
|
| 864 |
+
torch.bool: torch.int64,
|
| 865 |
+
},
|
| 866 |
+
torch.bool: {
|
| 867 |
+
torch.float16: torch.float16,
|
| 868 |
+
torch.float32: torch.float32,
|
| 869 |
+
torch.float64: torch.float64,
|
| 870 |
+
torch.complex64: torch.complex64,
|
| 871 |
+
torch.complex128: torch.complex128,
|
| 872 |
+
torch.uint8: torch.uint8,
|
| 873 |
+
torch.int8: torch.int8,
|
| 874 |
+
torch.int16: torch.int16,
|
| 875 |
+
torch.int32: torch.int32,
|
| 876 |
+
torch.int64: torch.int64,
|
| 877 |
+
torch.bool: torch.bool,
|
| 878 |
+
},
|
| 879 |
+
}
|
llava_next/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Dtypes/scalar type implementtaions with torch dtypes.
|
| 2 |
+
|
| 3 |
+
Here `dtype` is always a torch.dtype, this module knows nothing about
|
| 4 |
+
scalar types, wrapper dtypes or anything like that. PyTorch only.
|
| 5 |
+
"""
|
| 6 |
+
from collections import namedtuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
# defaults : mimic NumPy, allow user control
|
| 11 |
+
DefaultDTypes = namedtuple(
|
| 12 |
+
"DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
# a global state
|
| 16 |
+
# We set it the first time we call default_dtypes() to avoid importing
|
| 17 |
+
# torch._dynamo.config and create a circular reference
|
| 18 |
+
_default_dtypes = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def default_dtypes():
|
| 22 |
+
global _default_dtypes
|
| 23 |
+
if _default_dtypes is None:
|
| 24 |
+
import torch._dynamo.config as config
|
| 25 |
+
|
| 26 |
+
_default_dtypes = DefaultDTypes(
|
| 27 |
+
float_dtype=getattr(torch, config.numpy_default_float),
|
| 28 |
+
complex_dtype=getattr(torch, config.numpy_default_complex),
|
| 29 |
+
int_dtype=getattr(torch, config.numpy_default_int),
|
| 30 |
+
)
|
| 31 |
+
assert isinstance(_default_dtypes.float_dtype, torch.dtype)
|
| 32 |
+
assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
|
| 33 |
+
assert isinstance(_default_dtypes.int_dtype, torch.dtype)
|
| 34 |
+
return _default_dtypes
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_default_dtype_for(dtype):
|
| 38 |
+
"""Default scalar type given sctype category."""
|
| 39 |
+
if dtype == torch.bool:
|
| 40 |
+
return dtype
|
| 41 |
+
if dtype.is_complex:
|
| 42 |
+
return default_dtypes().complex_dtype
|
| 43 |
+
if dtype.is_floating_point:
|
| 44 |
+
return default_dtypes().float_dtype
|
| 45 |
+
# else, it must be (some) integer
|
| 46 |
+
return default_dtypes().int_dtype
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
from . import _casting_dicts as _cd
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
|
| 53 |
+
return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def result_type_impl(*tensors):
|
| 57 |
+
# NB: torch dtypes here
|
| 58 |
+
dtyp = tensors[0].dtype
|
| 59 |
+
if len(tensors) == 1:
|
| 60 |
+
return dtyp
|
| 61 |
+
|
| 62 |
+
for curr in tensors[1:]:
|
| 63 |
+
dtyp = _cd._result_type_dict[dtyp][curr.dtype]
|
| 64 |
+
|
| 65 |
+
return dtyp
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def python_type_for_torch(dtyp):
|
| 69 |
+
"""Get a python scalar type a torch dtype"""
|
| 70 |
+
if dtyp.is_floating_point:
|
| 71 |
+
typ = float
|
| 72 |
+
elif dtyp.is_complex:
|
| 73 |
+
typ = complex
|
| 74 |
+
elif dtyp == torch.bool:
|
| 75 |
+
typ = bool
|
| 76 |
+
else:
|
| 77 |
+
typ = int
|
| 78 |
+
return typ
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# ### NEP 50 helpers ###
|
| 82 |
+
|
| 83 |
+
SCALAR_TYPES = {int, bool, float, complex}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _dtype_for_scalar(py_type):
|
| 87 |
+
return {
|
| 88 |
+
bool: torch.bool,
|
| 89 |
+
int: torch.int64,
|
| 90 |
+
float: torch.float64,
|
| 91 |
+
complex: torch.complex128,
|
| 92 |
+
}[py_type]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _category(dtype):
|
| 96 |
+
return {
|
| 97 |
+
torch.bool: 0,
|
| 98 |
+
# int
|
| 99 |
+
torch.uint8: 1,
|
| 100 |
+
torch.int8: 1,
|
| 101 |
+
torch.int16: 1,
|
| 102 |
+
torch.int32: 1,
|
| 103 |
+
torch.int64: 1,
|
| 104 |
+
# float
|
| 105 |
+
torch.float16: 2,
|
| 106 |
+
torch.float32: 2,
|
| 107 |
+
torch.float64: 2,
|
| 108 |
+
# complex
|
| 109 |
+
torch.complex64: 3,
|
| 110 |
+
torch.complex128: 3,
|
| 111 |
+
}[dtype]
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def nep50_to_tensors(x1, x2, handle_weaks):
|
| 115 |
+
"""If either of inputs is a python scalar, type-promote with NEP 50."""
|
| 116 |
+
|
| 117 |
+
def to_tensor(scalar, dtype=None):
|
| 118 |
+
if dtype is None:
|
| 119 |
+
dtype = _dtype_for_scalar(type(scalar))
|
| 120 |
+
dtype = get_default_dtype_for(dtype)
|
| 121 |
+
return torch.as_tensor(scalar, dtype=dtype)
|
| 122 |
+
|
| 123 |
+
x1_is_weak = not isinstance(x1, torch.Tensor)
|
| 124 |
+
x2_is_weak = not isinstance(x2, torch.Tensor)
|
| 125 |
+
if not handle_weaks or (x1_is_weak and x2_is_weak):
|
| 126 |
+
x1 = to_tensor(x1) if x1_is_weak else x1
|
| 127 |
+
x2 = to_tensor(x2) if x2_is_weak else x2
|
| 128 |
+
return x1, x2
|
| 129 |
+
|
| 130 |
+
# scalar <op> tensor: NEP 50
|
| 131 |
+
assert x1_is_weak != x2_is_weak
|
| 132 |
+
|
| 133 |
+
weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
|
| 134 |
+
|
| 135 |
+
# find the dtype for the weak's type
|
| 136 |
+
weak_dtype = _dtype_for_scalar(type(weak))
|
| 137 |
+
|
| 138 |
+
cat_weak = _category(weak_dtype)
|
| 139 |
+
cat_not_weak = _category(not_weak.dtype)
|
| 140 |
+
|
| 141 |
+
dt = not_weak.dtype if cat_weak <= cat_not_weak else None
|
| 142 |
+
|
| 143 |
+
# special-case complex + float32
|
| 144 |
+
if weak_dtype.is_complex and not_weak.dtype == torch.float32:
|
| 145 |
+
dt = torch.complex64
|
| 146 |
+
|
| 147 |
+
# detect overflows: in PyTorch, uint8(-1) wraps around to 255,
|
| 148 |
+
# while NEP50 mandates an exception.
|
| 149 |
+
#
|
| 150 |
+
# Note that we only check if each element of the binop overflows,
|
| 151 |
+
# not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
|
| 152 |
+
# in uint8, but the result overflows and wrap around 255.
|
| 153 |
+
# Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
|
| 154 |
+
if cat_weak == 1 and cat_not_weak == 1:
|
| 155 |
+
# integers
|
| 156 |
+
iinfo = torch.iinfo(not_weak.dtype)
|
| 157 |
+
if not (iinfo.min <= weak <= iinfo.max):
|
| 158 |
+
raise OverflowError(
|
| 159 |
+
f"Python integer {weak} out of bounds for {not_weak.dtype}"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# finally, can make `weak` into a 0D tensor
|
| 163 |
+
weak = to_tensor(weak, dt)
|
| 164 |
+
|
| 165 |
+
return (weak, not_weak) if x1_is_weak else (not_weak, weak)
|
llava_next/lib/python3.10/site-packages/torch/_numpy/_ndarray.py
ADDED
|
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import builtins
|
| 4 |
+
import math
|
| 5 |
+
import operator
|
| 6 |
+
from typing import Sequence
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util
|
| 11 |
+
from ._normalizations import (
|
| 12 |
+
ArrayLike,
|
| 13 |
+
normalize_array_like,
|
| 14 |
+
normalizer,
|
| 15 |
+
NotImplementedType,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
newaxis = None
|
| 19 |
+
|
| 20 |
+
FLAGS = [
|
| 21 |
+
"C_CONTIGUOUS",
|
| 22 |
+
"F_CONTIGUOUS",
|
| 23 |
+
"OWNDATA",
|
| 24 |
+
"WRITEABLE",
|
| 25 |
+
"ALIGNED",
|
| 26 |
+
"WRITEBACKIFCOPY",
|
| 27 |
+
"FNC",
|
| 28 |
+
"FORC",
|
| 29 |
+
"BEHAVED",
|
| 30 |
+
"CARRAY",
|
| 31 |
+
"FARRAY",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
SHORTHAND_TO_FLAGS = {
|
| 35 |
+
"C": "C_CONTIGUOUS",
|
| 36 |
+
"F": "F_CONTIGUOUS",
|
| 37 |
+
"O": "OWNDATA",
|
| 38 |
+
"W": "WRITEABLE",
|
| 39 |
+
"A": "ALIGNED",
|
| 40 |
+
"X": "WRITEBACKIFCOPY",
|
| 41 |
+
"B": "BEHAVED",
|
| 42 |
+
"CA": "CARRAY",
|
| 43 |
+
"FA": "FARRAY",
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class Flags:
|
| 48 |
+
def __init__(self, flag_to_value: dict):
|
| 49 |
+
assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check
|
| 50 |
+
self._flag_to_value = flag_to_value
|
| 51 |
+
|
| 52 |
+
def __getattr__(self, attr: str):
|
| 53 |
+
if attr.islower() and attr.upper() in FLAGS:
|
| 54 |
+
return self[attr.upper()]
|
| 55 |
+
else:
|
| 56 |
+
raise AttributeError(f"No flag attribute '{attr}'")
|
| 57 |
+
|
| 58 |
+
def __getitem__(self, key):
|
| 59 |
+
if key in SHORTHAND_TO_FLAGS.keys():
|
| 60 |
+
key = SHORTHAND_TO_FLAGS[key]
|
| 61 |
+
if key in FLAGS:
|
| 62 |
+
try:
|
| 63 |
+
return self._flag_to_value[key]
|
| 64 |
+
except KeyError as e:
|
| 65 |
+
raise NotImplementedError(f"{key=}") from e
|
| 66 |
+
else:
|
| 67 |
+
raise KeyError(f"No flag key '{key}'")
|
| 68 |
+
|
| 69 |
+
def __setattr__(self, attr, value):
|
| 70 |
+
if attr.islower() and attr.upper() in FLAGS:
|
| 71 |
+
self[attr.upper()] = value
|
| 72 |
+
else:
|
| 73 |
+
super().__setattr__(attr, value)
|
| 74 |
+
|
| 75 |
+
def __setitem__(self, key, value):
|
| 76 |
+
if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys():
|
| 77 |
+
raise NotImplementedError("Modifying flags is not implemented")
|
| 78 |
+
else:
|
| 79 |
+
raise KeyError(f"No flag key '{key}'")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def create_method(fn, name=None):
|
| 83 |
+
name = name or fn.__name__
|
| 84 |
+
|
| 85 |
+
def f(*args, **kwargs):
|
| 86 |
+
return fn(*args, **kwargs)
|
| 87 |
+
|
| 88 |
+
f.__name__ = name
|
| 89 |
+
f.__qualname__ = f"ndarray.{name}"
|
| 90 |
+
return f
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# Map ndarray.name_method -> np.name_func
|
| 94 |
+
# If name_func == None, it means that name_method == name_func
|
| 95 |
+
methods = {
|
| 96 |
+
"clip": None,
|
| 97 |
+
"nonzero": None,
|
| 98 |
+
"repeat": None,
|
| 99 |
+
"round": None,
|
| 100 |
+
"squeeze": None,
|
| 101 |
+
"swapaxes": None,
|
| 102 |
+
"ravel": None,
|
| 103 |
+
# linalg
|
| 104 |
+
"diagonal": None,
|
| 105 |
+
"dot": None,
|
| 106 |
+
"trace": None,
|
| 107 |
+
# sorting
|
| 108 |
+
"argsort": None,
|
| 109 |
+
"searchsorted": None,
|
| 110 |
+
# reductions
|
| 111 |
+
"argmax": None,
|
| 112 |
+
"argmin": None,
|
| 113 |
+
"any": None,
|
| 114 |
+
"all": None,
|
| 115 |
+
"max": None,
|
| 116 |
+
"min": None,
|
| 117 |
+
"ptp": None,
|
| 118 |
+
"sum": None,
|
| 119 |
+
"prod": None,
|
| 120 |
+
"mean": None,
|
| 121 |
+
"var": None,
|
| 122 |
+
"std": None,
|
| 123 |
+
# scans
|
| 124 |
+
"cumsum": None,
|
| 125 |
+
"cumprod": None,
|
| 126 |
+
# advanced indexing
|
| 127 |
+
"take": None,
|
| 128 |
+
"choose": None,
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
dunder = {
|
| 132 |
+
"abs": "absolute",
|
| 133 |
+
"invert": None,
|
| 134 |
+
"pos": "positive",
|
| 135 |
+
"neg": "negative",
|
| 136 |
+
"gt": "greater",
|
| 137 |
+
"lt": "less",
|
| 138 |
+
"ge": "greater_equal",
|
| 139 |
+
"le": "less_equal",
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
# dunder methods with right-looking and in-place variants
|
| 143 |
+
ri_dunder = {
|
| 144 |
+
"add": None,
|
| 145 |
+
"sub": "subtract",
|
| 146 |
+
"mul": "multiply",
|
| 147 |
+
"truediv": "divide",
|
| 148 |
+
"floordiv": "floor_divide",
|
| 149 |
+
"pow": "power",
|
| 150 |
+
"mod": "remainder",
|
| 151 |
+
"and": "bitwise_and",
|
| 152 |
+
"or": "bitwise_or",
|
| 153 |
+
"xor": "bitwise_xor",
|
| 154 |
+
"lshift": "left_shift",
|
| 155 |
+
"rshift": "right_shift",
|
| 156 |
+
"matmul": None,
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _upcast_int_indices(index):
|
| 161 |
+
if isinstance(index, torch.Tensor):
|
| 162 |
+
if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
|
| 163 |
+
return index.to(torch.int64)
|
| 164 |
+
elif isinstance(index, tuple):
|
| 165 |
+
return tuple(_upcast_int_indices(i) for i in index)
|
| 166 |
+
return index
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
###############################################################
|
| 170 |
+
# ndarray class #
|
| 171 |
+
###############################################################
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class ndarray:
|
| 175 |
+
def __init__(self, t=None):
|
| 176 |
+
if t is None:
|
| 177 |
+
self.tensor = torch.Tensor()
|
| 178 |
+
elif isinstance(t, torch.Tensor):
|
| 179 |
+
self.tensor = t
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError(
|
| 182 |
+
"ndarray constructor is not recommended; prefer"
|
| 183 |
+
"either array(...) or zeros/empty(...)"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Register NumPy functions as methods
|
| 187 |
+
for method, name in methods.items():
|
| 188 |
+
fn = getattr(_funcs, name or method)
|
| 189 |
+
vars()[method] = create_method(fn, method)
|
| 190 |
+
|
| 191 |
+
# Regular methods but coming from ufuncs
|
| 192 |
+
conj = create_method(_ufuncs.conjugate, "conj")
|
| 193 |
+
conjugate = create_method(_ufuncs.conjugate)
|
| 194 |
+
|
| 195 |
+
for method, name in dunder.items():
|
| 196 |
+
fn = getattr(_ufuncs, name or method)
|
| 197 |
+
method = f"__{method}__"
|
| 198 |
+
vars()[method] = create_method(fn, method)
|
| 199 |
+
|
| 200 |
+
for method, name in ri_dunder.items():
|
| 201 |
+
fn = getattr(_ufuncs, name or method)
|
| 202 |
+
plain = f"__{method}__"
|
| 203 |
+
vars()[plain] = create_method(fn, plain)
|
| 204 |
+
rvar = f"__r{method}__"
|
| 205 |
+
vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
|
| 206 |
+
ivar = f"__i{method}__"
|
| 207 |
+
vars()[ivar] = create_method(
|
| 208 |
+
lambda self, other, fn=fn: fn(self, other, out=self), ivar
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
# There's no __idivmod__
|
| 212 |
+
__divmod__ = create_method(_ufuncs.divmod, "__divmod__")
|
| 213 |
+
__rdivmod__ = create_method(
|
| 214 |
+
lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# prevent loop variables leaking into the ndarray class namespace
|
| 218 |
+
del ivar, rvar, name, plain, fn, method
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def shape(self):
|
| 222 |
+
return tuple(self.tensor.shape)
|
| 223 |
+
|
| 224 |
+
@property
|
| 225 |
+
def size(self):
|
| 226 |
+
return self.tensor.numel()
|
| 227 |
+
|
| 228 |
+
@property
|
| 229 |
+
def ndim(self):
|
| 230 |
+
return self.tensor.ndim
|
| 231 |
+
|
| 232 |
+
@property
|
| 233 |
+
def dtype(self):
|
| 234 |
+
return _dtypes.dtype(self.tensor.dtype)
|
| 235 |
+
|
| 236 |
+
@property
|
| 237 |
+
def strides(self):
|
| 238 |
+
elsize = self.tensor.element_size()
|
| 239 |
+
return tuple(stride * elsize for stride in self.tensor.stride())
|
| 240 |
+
|
| 241 |
+
@property
|
| 242 |
+
def itemsize(self):
|
| 243 |
+
return self.tensor.element_size()
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def flags(self):
|
| 247 |
+
# Note contiguous in torch is assumed C-style
|
| 248 |
+
return Flags(
|
| 249 |
+
{
|
| 250 |
+
"C_CONTIGUOUS": self.tensor.is_contiguous(),
|
| 251 |
+
"F_CONTIGUOUS": self.T.tensor.is_contiguous(),
|
| 252 |
+
"OWNDATA": self.tensor._base is None,
|
| 253 |
+
"WRITEABLE": True, # pytorch does not have readonly tensors
|
| 254 |
+
}
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
@property
|
| 258 |
+
def data(self):
|
| 259 |
+
return self.tensor.data_ptr()
|
| 260 |
+
|
| 261 |
+
@property
|
| 262 |
+
def nbytes(self):
|
| 263 |
+
return self.tensor.storage().nbytes()
|
| 264 |
+
|
| 265 |
+
@property
|
| 266 |
+
def T(self):
|
| 267 |
+
return self.transpose()
|
| 268 |
+
|
| 269 |
+
@property
|
| 270 |
+
def real(self):
|
| 271 |
+
return _funcs.real(self)
|
| 272 |
+
|
| 273 |
+
@real.setter
|
| 274 |
+
def real(self, value):
|
| 275 |
+
self.tensor.real = asarray(value).tensor
|
| 276 |
+
|
| 277 |
+
@property
|
| 278 |
+
def imag(self):
|
| 279 |
+
return _funcs.imag(self)
|
| 280 |
+
|
| 281 |
+
@imag.setter
|
| 282 |
+
def imag(self, value):
|
| 283 |
+
self.tensor.imag = asarray(value).tensor
|
| 284 |
+
|
| 285 |
+
# ctors
|
| 286 |
+
def astype(self, dtype):
|
| 287 |
+
torch_dtype = _dtypes.dtype(dtype).torch_dtype
|
| 288 |
+
t = self.tensor.to(torch_dtype)
|
| 289 |
+
return ndarray(t)
|
| 290 |
+
|
| 291 |
+
@normalizer
|
| 292 |
+
def copy(self: ArrayLike, order: NotImplementedType = "C"):
|
| 293 |
+
return self.clone()
|
| 294 |
+
|
| 295 |
+
@normalizer
|
| 296 |
+
def flatten(self: ArrayLike, order: NotImplementedType = "C"):
|
| 297 |
+
return torch.flatten(self)
|
| 298 |
+
|
| 299 |
+
def resize(self, *new_shape, refcheck=False):
|
| 300 |
+
a = self.tensor
|
| 301 |
+
# TODO(Lezcano) This is not done in-place
|
| 302 |
+
# implementation of ndarray.resize.
|
| 303 |
+
# NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
|
| 304 |
+
if refcheck:
|
| 305 |
+
raise NotImplementedError(
|
| 306 |
+
f"resize(..., refcheck={refcheck} is not implemented."
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
if new_shape in [(), (None,)]:
|
| 310 |
+
return
|
| 311 |
+
|
| 312 |
+
# support both x.resize((2, 2)) and x.resize(2, 2)
|
| 313 |
+
if len(new_shape) == 1:
|
| 314 |
+
new_shape = new_shape[0]
|
| 315 |
+
if isinstance(new_shape, int):
|
| 316 |
+
new_shape = (new_shape,)
|
| 317 |
+
|
| 318 |
+
a = a.flatten()
|
| 319 |
+
|
| 320 |
+
if builtins.any(x < 0 for x in new_shape):
|
| 321 |
+
raise ValueError("all elements of `new_shape` must be non-negative")
|
| 322 |
+
|
| 323 |
+
new_numel = math.prod(new_shape)
|
| 324 |
+
if new_numel < a.numel():
|
| 325 |
+
# shrink
|
| 326 |
+
ret = a[:new_numel].reshape(new_shape)
|
| 327 |
+
else:
|
| 328 |
+
b = torch.zeros(new_numel)
|
| 329 |
+
b[: a.numel()] = a
|
| 330 |
+
ret = b.reshape(new_shape)
|
| 331 |
+
self.tensor = ret
|
| 332 |
+
|
| 333 |
+
def view(self, dtype):
|
| 334 |
+
torch_dtype = _dtypes.dtype(dtype).torch_dtype
|
| 335 |
+
tview = self.tensor.view(torch_dtype)
|
| 336 |
+
return ndarray(tview)
|
| 337 |
+
|
| 338 |
+
@normalizer
|
| 339 |
+
def fill(self, value: ArrayLike):
|
| 340 |
+
# Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
|
| 341 |
+
# error out on D > 0 arrays
|
| 342 |
+
self.tensor.fill_(value)
|
| 343 |
+
|
| 344 |
+
def tolist(self):
|
| 345 |
+
return self.tensor.tolist()
|
| 346 |
+
|
| 347 |
+
def __str__(self):
|
| 348 |
+
return (
|
| 349 |
+
str(self.tensor)
|
| 350 |
+
.replace("tensor", "torch.ndarray")
|
| 351 |
+
.replace("dtype=torch.", "dtype=")
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
__repr__ = create_method(__str__)
|
| 355 |
+
|
| 356 |
+
def __eq__(self, other):
|
| 357 |
+
try:
|
| 358 |
+
return _ufuncs.equal(self, other)
|
| 359 |
+
except (RuntimeError, TypeError):
|
| 360 |
+
# Failed to convert other to array: definitely not equal.
|
| 361 |
+
falsy = torch.full(self.shape, fill_value=False, dtype=bool)
|
| 362 |
+
return asarray(falsy)
|
| 363 |
+
|
| 364 |
+
def __ne__(self, other):
|
| 365 |
+
return ~(self == other)
|
| 366 |
+
|
| 367 |
+
def __index__(self):
|
| 368 |
+
try:
|
| 369 |
+
return operator.index(self.tensor.item())
|
| 370 |
+
except Exception:
|
| 371 |
+
raise TypeError(
|
| 372 |
+
"only integer scalar arrays can be converted to a scalar index"
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def __bool__(self):
|
| 376 |
+
return bool(self.tensor)
|
| 377 |
+
|
| 378 |
+
def __int__(self):
|
| 379 |
+
return int(self.tensor)
|
| 380 |
+
|
| 381 |
+
def __float__(self):
|
| 382 |
+
return float(self.tensor)
|
| 383 |
+
|
| 384 |
+
def __complex__(self):
|
| 385 |
+
return complex(self.tensor)
|
| 386 |
+
|
| 387 |
+
def is_integer(self):
|
| 388 |
+
try:
|
| 389 |
+
v = self.tensor.item()
|
| 390 |
+
result = int(v) == v
|
| 391 |
+
except Exception:
|
| 392 |
+
result = False
|
| 393 |
+
return result
|
| 394 |
+
|
| 395 |
+
def __len__(self):
|
| 396 |
+
return self.tensor.shape[0]
|
| 397 |
+
|
| 398 |
+
def __contains__(self, x):
|
| 399 |
+
return self.tensor.__contains__(x)
|
| 400 |
+
|
| 401 |
+
def transpose(self, *axes):
|
| 402 |
+
# np.transpose(arr, axis=None) but arr.transpose(*axes)
|
| 403 |
+
return _funcs.transpose(self, axes)
|
| 404 |
+
|
| 405 |
+
def reshape(self, *shape, order="C"):
|
| 406 |
+
# arr.reshape(shape) and arr.reshape(*shape)
|
| 407 |
+
return _funcs.reshape(self, shape, order=order)
|
| 408 |
+
|
| 409 |
+
def sort(self, axis=-1, kind=None, order=None):
|
| 410 |
+
# ndarray.sort works in-place
|
| 411 |
+
_funcs.copyto(self, _funcs.sort(self, axis, kind, order))
|
| 412 |
+
|
| 413 |
+
def item(self, *args):
|
| 414 |
+
# Mimic NumPy's implementation with three special cases (no arguments,
|
| 415 |
+
# a flat index and a multi-index):
|
| 416 |
+
# https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702
|
| 417 |
+
if args == ():
|
| 418 |
+
return self.tensor.item()
|
| 419 |
+
elif len(args) == 1:
|
| 420 |
+
# int argument
|
| 421 |
+
return self.ravel()[args[0]]
|
| 422 |
+
else:
|
| 423 |
+
return self.__getitem__(args)
|
| 424 |
+
|
| 425 |
+
def __getitem__(self, index):
|
| 426 |
+
tensor = self.tensor
|
| 427 |
+
|
| 428 |
+
def neg_step(i, s):
|
| 429 |
+
if not (isinstance(s, slice) and s.step is not None and s.step < 0):
|
| 430 |
+
return s
|
| 431 |
+
|
| 432 |
+
nonlocal tensor
|
| 433 |
+
tensor = torch.flip(tensor, (i,))
|
| 434 |
+
|
| 435 |
+
# Account for the fact that a slice includes the start but not the end
|
| 436 |
+
assert isinstance(s.start, int) or s.start is None
|
| 437 |
+
assert isinstance(s.stop, int) or s.stop is None
|
| 438 |
+
start = s.stop + 1 if s.stop else None
|
| 439 |
+
stop = s.start + 1 if s.start else None
|
| 440 |
+
|
| 441 |
+
return slice(start, stop, -s.step)
|
| 442 |
+
|
| 443 |
+
if isinstance(index, Sequence):
|
| 444 |
+
index = type(index)(neg_step(i, s) for i, s in enumerate(index))
|
| 445 |
+
else:
|
| 446 |
+
index = neg_step(0, index)
|
| 447 |
+
index = _util.ndarrays_to_tensors(index)
|
| 448 |
+
index = _upcast_int_indices(index)
|
| 449 |
+
return ndarray(tensor.__getitem__(index))
|
| 450 |
+
|
| 451 |
+
def __setitem__(self, index, value):
|
| 452 |
+
index = _util.ndarrays_to_tensors(index)
|
| 453 |
+
index = _upcast_int_indices(index)
|
| 454 |
+
|
| 455 |
+
if type(value) not in _dtypes_impl.SCALAR_TYPES:
|
| 456 |
+
value = normalize_array_like(value)
|
| 457 |
+
value = _util.cast_if_needed(value, self.tensor.dtype)
|
| 458 |
+
|
| 459 |
+
return self.tensor.__setitem__(index, value)
|
| 460 |
+
|
| 461 |
+
take = _funcs.take
|
| 462 |
+
put = _funcs.put
|
| 463 |
+
|
| 464 |
+
def __dlpack__(self, *, stream=None):
|
| 465 |
+
return self.tensor.__dlpack__(stream=stream)
|
| 466 |
+
|
| 467 |
+
def __dlpack_device__(self):
|
| 468 |
+
return self.tensor.__dlpack_device__()
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def _tolist(obj):
|
| 472 |
+
"""Recusrively convert tensors into lists."""
|
| 473 |
+
a1 = []
|
| 474 |
+
for elem in obj:
|
| 475 |
+
if isinstance(elem, (list, tuple)):
|
| 476 |
+
elem = _tolist(elem)
|
| 477 |
+
if isinstance(elem, ndarray):
|
| 478 |
+
a1.append(elem.tensor.tolist())
|
| 479 |
+
else:
|
| 480 |
+
a1.append(elem)
|
| 481 |
+
return a1
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
# This is the ideally the only place which talks to ndarray directly.
|
| 485 |
+
# The rest goes through asarray (preferred) or array.
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
|
| 489 |
+
if subok is not False:
|
| 490 |
+
raise NotImplementedError("'subok' parameter is not supported.")
|
| 491 |
+
if like is not None:
|
| 492 |
+
raise NotImplementedError("'like' parameter is not supported.")
|
| 493 |
+
if order != "K":
|
| 494 |
+
raise NotImplementedError
|
| 495 |
+
|
| 496 |
+
# a happy path
|
| 497 |
+
if (
|
| 498 |
+
isinstance(obj, ndarray)
|
| 499 |
+
and copy is False
|
| 500 |
+
and dtype is None
|
| 501 |
+
and ndmin <= obj.ndim
|
| 502 |
+
):
|
| 503 |
+
return obj
|
| 504 |
+
|
| 505 |
+
# lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
|
| 506 |
+
if isinstance(obj, (list, tuple)):
|
| 507 |
+
obj = _tolist(obj)
|
| 508 |
+
|
| 509 |
+
# is obj an ndarray already?
|
| 510 |
+
if isinstance(obj, ndarray):
|
| 511 |
+
obj = obj.tensor
|
| 512 |
+
|
| 513 |
+
# is a specific dtype requrested?
|
| 514 |
+
torch_dtype = None
|
| 515 |
+
if dtype is not None:
|
| 516 |
+
torch_dtype = _dtypes.dtype(dtype).torch_dtype
|
| 517 |
+
|
| 518 |
+
tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
|
| 519 |
+
return ndarray(tensor)
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def asarray(a, dtype=None, order="K", *, like=None):
|
| 523 |
+
return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def ascontiguousarray(a, dtype=None, *, like=None):
|
| 527 |
+
arr = asarray(a, dtype=dtype, like=like)
|
| 528 |
+
if not arr.tensor.is_contiguous():
|
| 529 |
+
arr.tensor = arr.tensor.contiguous()
|
| 530 |
+
return arr
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def from_dlpack(x, /):
|
| 534 |
+
t = torch.from_dlpack(x)
|
| 535 |
+
return ndarray(t)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def _extract_dtype(entry):
|
| 539 |
+
try:
|
| 540 |
+
dty = _dtypes.dtype(entry)
|
| 541 |
+
except Exception:
|
| 542 |
+
dty = asarray(entry).dtype
|
| 543 |
+
return dty
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def can_cast(from_, to, casting="safe"):
|
| 547 |
+
from_ = _extract_dtype(from_)
|
| 548 |
+
to_ = _extract_dtype(to)
|
| 549 |
+
|
| 550 |
+
return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def result_type(*arrays_and_dtypes):
|
| 554 |
+
tensors = []
|
| 555 |
+
for entry in arrays_and_dtypes:
|
| 556 |
+
try:
|
| 557 |
+
t = asarray(entry).tensor
|
| 558 |
+
except (RuntimeError, ValueError, TypeError):
|
| 559 |
+
dty = _dtypes.dtype(entry)
|
| 560 |
+
t = torch.empty(1, dtype=dty.torch_dtype)
|
| 561 |
+
tensors.append(t)
|
| 562 |
+
|
| 563 |
+
torch_dtype = _dtypes_impl.result_type_impl(*tensors)
|
| 564 |
+
return _dtypes.dtype(torch_dtype)
|
llava_next/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from . import _binary_ufuncs_impl, _dtypes_impl, _unary_ufuncs_impl, _util
|
| 8 |
+
from ._normalizations import (
|
| 9 |
+
ArrayLike,
|
| 10 |
+
CastingModes,
|
| 11 |
+
DTypeLike,
|
| 12 |
+
normalizer,
|
| 13 |
+
NotImplementedType,
|
| 14 |
+
OutArray,
|
| 15 |
+
Scalar,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _ufunc_postprocess(result, out, casting):
|
| 20 |
+
if out is not None:
|
| 21 |
+
result = _util.typecast_tensor(result, out.dtype.torch_dtype, casting)
|
| 22 |
+
result = torch.broadcast_to(result, out.shape)
|
| 23 |
+
return result
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# ############# Binary ufuncs ######################
|
| 27 |
+
|
| 28 |
+
_binary = [
|
| 29 |
+
name
|
| 30 |
+
for name in dir(_binary_ufuncs_impl)
|
| 31 |
+
if not name.startswith("_") and name not in ["torch", "matmul", "divmod", "ldexp"]
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
NEP50_FUNCS = (
|
| 36 |
+
"add",
|
| 37 |
+
"subtract",
|
| 38 |
+
"multiply",
|
| 39 |
+
"floor_divide",
|
| 40 |
+
"true_divide",
|
| 41 |
+
"divide",
|
| 42 |
+
"remainder",
|
| 43 |
+
"bitwise_and",
|
| 44 |
+
"bitwise_or",
|
| 45 |
+
"bitwise_xor",
|
| 46 |
+
"bitwise_left_shift",
|
| 47 |
+
"bitwise_right_shift",
|
| 48 |
+
"hypot",
|
| 49 |
+
"arctan2",
|
| 50 |
+
"logaddexp",
|
| 51 |
+
"logaddexp2",
|
| 52 |
+
"heaviside",
|
| 53 |
+
"copysign",
|
| 54 |
+
"fmax",
|
| 55 |
+
"minimum",
|
| 56 |
+
"fmin",
|
| 57 |
+
"maximum",
|
| 58 |
+
"fmod",
|
| 59 |
+
"gcd",
|
| 60 |
+
"lcm",
|
| 61 |
+
"pow",
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def deco_binary_ufunc(torch_func):
|
| 66 |
+
"""Common infra for binary ufuncs.
|
| 67 |
+
|
| 68 |
+
Normalize arguments, sort out type casting, broadcasting and delegate to
|
| 69 |
+
the pytorch functions for the actual work.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
@normalizer
|
| 73 |
+
def wrapped(
|
| 74 |
+
x1: Union[ArrayLike, Scalar],
|
| 75 |
+
x2: Union[ArrayLike, Scalar],
|
| 76 |
+
/,
|
| 77 |
+
out: Optional[OutArray] = None,
|
| 78 |
+
*,
|
| 79 |
+
where: NotImplementedType = True,
|
| 80 |
+
casting: Optional[CastingModes] = "same_kind",
|
| 81 |
+
order: NotImplementedType = "K",
|
| 82 |
+
dtype: Optional[DTypeLike] = None,
|
| 83 |
+
subok: NotImplementedType = False,
|
| 84 |
+
signature: NotImplementedType = None,
|
| 85 |
+
extobj: NotImplementedType = None,
|
| 86 |
+
):
|
| 87 |
+
if dtype is not None:
|
| 88 |
+
|
| 89 |
+
def cast(x, dtype):
|
| 90 |
+
if isinstance(x, torch.Tensor):
|
| 91 |
+
return _util.typecast_tensor(x, dtype, casting)
|
| 92 |
+
else:
|
| 93 |
+
return torch.as_tensor(x, dtype=dtype)
|
| 94 |
+
|
| 95 |
+
x1 = cast(x1, dtype)
|
| 96 |
+
x2 = cast(x2, dtype)
|
| 97 |
+
elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor):
|
| 98 |
+
dtype = _dtypes_impl.result_type_impl(x1, x2)
|
| 99 |
+
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
|
| 100 |
+
else:
|
| 101 |
+
x1, x2 = _dtypes_impl.nep50_to_tensors(
|
| 102 |
+
x1, x2, torch_func.__name__ in NEP50_FUNCS
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
result = torch_func(x1, x2)
|
| 106 |
+
|
| 107 |
+
return _ufunc_postprocess(result, out, casting)
|
| 108 |
+
|
| 109 |
+
wrapped.__qualname__ = torch_func.__name__
|
| 110 |
+
wrapped.__name__ = torch_func.__name__
|
| 111 |
+
|
| 112 |
+
return wrapped
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# matmul's signature is _slightly_ different from other ufuncs:
|
| 116 |
+
# - no where=...
|
| 117 |
+
# - additional axis=..., axes=...
|
| 118 |
+
# - no NEP50 scalars in or out
|
| 119 |
+
@normalizer
|
| 120 |
+
def matmul(
|
| 121 |
+
x1: ArrayLike,
|
| 122 |
+
x2: ArrayLike,
|
| 123 |
+
/,
|
| 124 |
+
out: Optional[OutArray] = None,
|
| 125 |
+
*,
|
| 126 |
+
casting: Optional[CastingModes] = "same_kind",
|
| 127 |
+
order: NotImplementedType = "K",
|
| 128 |
+
dtype: Optional[DTypeLike] = None,
|
| 129 |
+
subok: NotImplementedType = False,
|
| 130 |
+
signature: NotImplementedType = None,
|
| 131 |
+
extobj: NotImplementedType = None,
|
| 132 |
+
axes: NotImplementedType = None,
|
| 133 |
+
axis: NotImplementedType = None,
|
| 134 |
+
):
|
| 135 |
+
if dtype is None:
|
| 136 |
+
dtype = _dtypes_impl.result_type_impl(x1, x2)
|
| 137 |
+
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
|
| 138 |
+
|
| 139 |
+
result = _binary_ufuncs_impl.matmul(x1, x2)
|
| 140 |
+
|
| 141 |
+
result = _ufunc_postprocess(result, out, casting)
|
| 142 |
+
return result
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# ldexp casting is special : the dtype of the result == dtype of the 1st arg
|
| 146 |
+
@normalizer
|
| 147 |
+
def ldexp(
|
| 148 |
+
x1: Union[ArrayLike, Scalar],
|
| 149 |
+
x2: Union[ArrayLike, Scalar],
|
| 150 |
+
/,
|
| 151 |
+
out: Optional[OutArray] = None,
|
| 152 |
+
*,
|
| 153 |
+
where: NotImplementedType = True,
|
| 154 |
+
casting: Optional[CastingModes] = "same_kind",
|
| 155 |
+
order: NotImplementedType = "K",
|
| 156 |
+
dtype: Optional[DTypeLike] = None,
|
| 157 |
+
subok: NotImplementedType = False,
|
| 158 |
+
signature: NotImplementedType = None,
|
| 159 |
+
extobj: NotImplementedType = None,
|
| 160 |
+
):
|
| 161 |
+
if dtype is not None:
|
| 162 |
+
if isinstance(x1, torch.Tensor):
|
| 163 |
+
x1 = _util.typecast_tensor(x1, dtype, casting)
|
| 164 |
+
else:
|
| 165 |
+
x1 = torch.as_tensor(x1, dtype=dtype)
|
| 166 |
+
else:
|
| 167 |
+
if not isinstance(x1, torch.Tensor):
|
| 168 |
+
x1 = torch.as_tensor(x1)
|
| 169 |
+
x1 = _util.cast_int_to_float(x1)
|
| 170 |
+
|
| 171 |
+
x2 = torch.as_tensor(x2)
|
| 172 |
+
# the second arg must be integer
|
| 173 |
+
if _dtypes_impl._category(x2.dtype) != 1:
|
| 174 |
+
raise ValueError("ldexp 2nd arg must be integer")
|
| 175 |
+
|
| 176 |
+
result = _binary_ufuncs_impl.ldexp(x1, x2)
|
| 177 |
+
|
| 178 |
+
if x1.dtype == torch.float16:
|
| 179 |
+
# torch.ldexp(f16, int) -> f32, undo it
|
| 180 |
+
result = result.to(torch.float16)
|
| 181 |
+
|
| 182 |
+
return _ufunc_postprocess(result, out, casting)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# nin=2, nout=2
|
| 186 |
+
@normalizer
|
| 187 |
+
def divmod(
|
| 188 |
+
x1: ArrayLike,
|
| 189 |
+
x2: ArrayLike,
|
| 190 |
+
out1: Optional[OutArray] = None,
|
| 191 |
+
out2: Optional[OutArray] = None,
|
| 192 |
+
/,
|
| 193 |
+
out: tuple[Optional[OutArray], Optional[OutArray]] = (None, None),
|
| 194 |
+
*,
|
| 195 |
+
where: NotImplementedType = True,
|
| 196 |
+
casting: Optional[CastingModes] = "same_kind",
|
| 197 |
+
order: NotImplementedType = "K",
|
| 198 |
+
dtype: Optional[DTypeLike] = None,
|
| 199 |
+
subok: NotImplementedType = False,
|
| 200 |
+
signature: NotImplementedType = None,
|
| 201 |
+
extobj: NotImplementedType = None,
|
| 202 |
+
):
|
| 203 |
+
# make sure we either have no out arrays at all, or there is either
|
| 204 |
+
# out1, out2, or out=tuple, but not both
|
| 205 |
+
num_outs = sum(x is not None for x in [out1, out2])
|
| 206 |
+
if num_outs == 1:
|
| 207 |
+
raise ValueError("both out1 and out2 need to be provided")
|
| 208 |
+
elif num_outs == 2:
|
| 209 |
+
o1, o2 = out
|
| 210 |
+
if o1 is not None or o2 is not None:
|
| 211 |
+
raise TypeError(
|
| 212 |
+
"cannot specify 'out' as both a positional and keyword argument"
|
| 213 |
+
)
|
| 214 |
+
else:
|
| 215 |
+
out1, out2 = out
|
| 216 |
+
|
| 217 |
+
if dtype is None:
|
| 218 |
+
dtype = _dtypes_impl.result_type_impl(x1, x2)
|
| 219 |
+
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
|
| 220 |
+
|
| 221 |
+
quot, rem = _binary_ufuncs_impl.divmod(x1, x2)
|
| 222 |
+
|
| 223 |
+
quot = _ufunc_postprocess(quot, out1, casting)
|
| 224 |
+
rem = _ufunc_postprocess(rem, out2, casting)
|
| 225 |
+
return quot, rem
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
#
|
| 229 |
+
# Attach ufuncs to this module, for a further export to the public namespace in __init__.py
|
| 230 |
+
#
|
| 231 |
+
for name in _binary:
|
| 232 |
+
ufunc = getattr(_binary_ufuncs_impl, name)
|
| 233 |
+
vars()[name] = deco_binary_ufunc(ufunc)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def modf(x, /, *args, **kwds):
|
| 237 |
+
quot, rem = divmod(x, 1, *args, **kwds)
|
| 238 |
+
return rem, quot
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
_binary = _binary + ["divmod", "modf", "matmul", "ldexp"]
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# ############# Unary ufuncs ######################
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
_unary = [
|
| 248 |
+
name
|
| 249 |
+
for name in dir(_unary_ufuncs_impl)
|
| 250 |
+
if not name.startswith("_") and name != "torch"
|
| 251 |
+
]
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# these are ufunc(int) -> float
|
| 255 |
+
_fp_unary = [
|
| 256 |
+
"arccos",
|
| 257 |
+
"arccosh",
|
| 258 |
+
"arcsin",
|
| 259 |
+
"arcsinh",
|
| 260 |
+
"arctan",
|
| 261 |
+
"arctanh",
|
| 262 |
+
"cbrt",
|
| 263 |
+
"cos",
|
| 264 |
+
"cosh",
|
| 265 |
+
"deg2rad",
|
| 266 |
+
"degrees",
|
| 267 |
+
"exp",
|
| 268 |
+
"exp2",
|
| 269 |
+
"expm1",
|
| 270 |
+
"log",
|
| 271 |
+
"log10",
|
| 272 |
+
"log1p",
|
| 273 |
+
"log2",
|
| 274 |
+
"rad2deg",
|
| 275 |
+
"radians",
|
| 276 |
+
"reciprocal",
|
| 277 |
+
"sin",
|
| 278 |
+
"sinh",
|
| 279 |
+
"sqrt",
|
| 280 |
+
"square",
|
| 281 |
+
"tan",
|
| 282 |
+
"tanh",
|
| 283 |
+
"trunc",
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def deco_unary_ufunc(torch_func):
|
| 288 |
+
"""Common infra for unary ufuncs.
|
| 289 |
+
|
| 290 |
+
Normalize arguments, sort out type casting, broadcasting and delegate to
|
| 291 |
+
the pytorch functions for the actual work.
|
| 292 |
+
"""
|
| 293 |
+
|
| 294 |
+
@normalizer
|
| 295 |
+
def wrapped(
|
| 296 |
+
x: ArrayLike,
|
| 297 |
+
/,
|
| 298 |
+
out: Optional[OutArray] = None,
|
| 299 |
+
*,
|
| 300 |
+
where=True,
|
| 301 |
+
casting: Optional[CastingModes] = "same_kind",
|
| 302 |
+
order="K",
|
| 303 |
+
dtype: Optional[DTypeLike] = None,
|
| 304 |
+
subok: NotImplementedType = False,
|
| 305 |
+
signature=None,
|
| 306 |
+
extobj=None,
|
| 307 |
+
):
|
| 308 |
+
if dtype is not None:
|
| 309 |
+
x = _util.typecast_tensor(x, dtype, casting)
|
| 310 |
+
|
| 311 |
+
if torch_func.__name__ in _fp_unary:
|
| 312 |
+
x = _util.cast_int_to_float(x)
|
| 313 |
+
|
| 314 |
+
result = torch_func(x)
|
| 315 |
+
result = _ufunc_postprocess(result, out, casting)
|
| 316 |
+
return result
|
| 317 |
+
|
| 318 |
+
wrapped.__qualname__ = torch_func.__name__
|
| 319 |
+
wrapped.__name__ = torch_func.__name__
|
| 320 |
+
|
| 321 |
+
return wrapped
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
#
|
| 325 |
+
# Attach ufuncs to this module, for a further export to the public namespace in __init__.py
|
| 326 |
+
#
|
| 327 |
+
for name in _unary:
|
| 328 |
+
ufunc = getattr(_unary_ufuncs_impl, name)
|
| 329 |
+
vars()[name] = deco_unary_ufunc(ufunc)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
__all__ = _binary + _unary # noqa: PLE0605
|
llava_next/lib/python3.10/site-packages/torch/_numpy/_util.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Assorted utilities, which do not need anything other then torch and stdlib.
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
import operator
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from . import _dtypes_impl
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# https://github.com/numpy/numpy/blob/v1.23.0/numpy/distutils/misc_util.py#L497-L504
|
| 12 |
+
def is_sequence(seq):
|
| 13 |
+
if isinstance(seq, str):
|
| 14 |
+
return False
|
| 15 |
+
try:
|
| 16 |
+
len(seq)
|
| 17 |
+
except Exception:
|
| 18 |
+
return False
|
| 19 |
+
return True
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class AxisError(ValueError, IndexError):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class UFuncTypeError(TypeError, RuntimeError):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def cast_if_needed(tensor, dtype):
|
| 31 |
+
# NB: no casting if dtype=None
|
| 32 |
+
if dtype is not None and tensor.dtype != dtype:
|
| 33 |
+
tensor = tensor.to(dtype)
|
| 34 |
+
return tensor
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def cast_int_to_float(x):
|
| 38 |
+
# cast integers and bools to the default float dtype
|
| 39 |
+
if _dtypes_impl._category(x.dtype) < 2:
|
| 40 |
+
x = x.to(_dtypes_impl.default_dtypes().float_dtype)
|
| 41 |
+
return x
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# a replica of the version in ./numpy/numpy/core/src/multiarray/common.h
|
| 45 |
+
def normalize_axis_index(ax, ndim, argname=None):
|
| 46 |
+
if not (-ndim <= ax < ndim):
|
| 47 |
+
raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}")
|
| 48 |
+
if ax < 0:
|
| 49 |
+
ax += ndim
|
| 50 |
+
return ax
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378
|
| 54 |
+
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
|
| 55 |
+
"""
|
| 56 |
+
Normalizes an axis argument into a tuple of non-negative integer axes.
|
| 57 |
+
|
| 58 |
+
This handles shorthands such as ``1`` and converts them to ``(1,)``,
|
| 59 |
+
as well as performing the handling of negative indices covered by
|
| 60 |
+
`normalize_axis_index`.
|
| 61 |
+
|
| 62 |
+
By default, this forbids axes from being specified multiple times.
|
| 63 |
+
Used internally by multi-axis-checking logic.
|
| 64 |
+
|
| 65 |
+
Parameters
|
| 66 |
+
----------
|
| 67 |
+
axis : int, iterable of int
|
| 68 |
+
The un-normalized index or indices of the axis.
|
| 69 |
+
ndim : int
|
| 70 |
+
The number of dimensions of the array that `axis` should be normalized
|
| 71 |
+
against.
|
| 72 |
+
argname : str, optional
|
| 73 |
+
A prefix to put before the error message, typically the name of the
|
| 74 |
+
argument.
|
| 75 |
+
allow_duplicate : bool, optional
|
| 76 |
+
If False, the default, disallow an axis from being specified twice.
|
| 77 |
+
|
| 78 |
+
Returns
|
| 79 |
+
-------
|
| 80 |
+
normalized_axes : tuple of int
|
| 81 |
+
The normalized axis index, such that `0 <= normalized_axis < ndim`
|
| 82 |
+
"""
|
| 83 |
+
# Optimization to speed-up the most common cases.
|
| 84 |
+
if type(axis) not in (tuple, list):
|
| 85 |
+
try:
|
| 86 |
+
axis = [operator.index(axis)]
|
| 87 |
+
except TypeError:
|
| 88 |
+
pass
|
| 89 |
+
# Going via an iterator directly is slower than via list comprehension.
|
| 90 |
+
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
|
| 91 |
+
if not allow_duplicate and len(set(axis)) != len(axis):
|
| 92 |
+
if argname:
|
| 93 |
+
raise ValueError(f"repeated axis in `{argname}` argument")
|
| 94 |
+
else:
|
| 95 |
+
raise ValueError("repeated axis")
|
| 96 |
+
return axis
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def allow_only_single_axis(axis):
|
| 100 |
+
if axis is None:
|
| 101 |
+
return axis
|
| 102 |
+
if len(axis) != 1:
|
| 103 |
+
raise NotImplementedError("does not handle tuple axis")
|
| 104 |
+
return axis[0]
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def expand_shape(arr_shape, axis):
|
| 108 |
+
# taken from numpy 1.23.x, expand_dims function
|
| 109 |
+
if type(axis) not in (list, tuple):
|
| 110 |
+
axis = (axis,)
|
| 111 |
+
out_ndim = len(axis) + len(arr_shape)
|
| 112 |
+
axis = normalize_axis_tuple(axis, out_ndim)
|
| 113 |
+
shape_it = iter(arr_shape)
|
| 114 |
+
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
|
| 115 |
+
return shape
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def apply_keepdims(tensor, axis, ndim):
|
| 119 |
+
if axis is None:
|
| 120 |
+
# tensor was a scalar
|
| 121 |
+
shape = (1,) * ndim
|
| 122 |
+
tensor = tensor.expand(shape).contiguous()
|
| 123 |
+
else:
|
| 124 |
+
shape = expand_shape(tensor.shape, axis)
|
| 125 |
+
tensor = tensor.reshape(shape)
|
| 126 |
+
return tensor
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def axis_none_flatten(*tensors, axis=None):
|
| 130 |
+
"""Flatten the arrays if axis is None."""
|
| 131 |
+
if axis is None:
|
| 132 |
+
tensors = tuple(ar.flatten() for ar in tensors)
|
| 133 |
+
return tensors, 0
|
| 134 |
+
else:
|
| 135 |
+
return tensors, axis
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def typecast_tensor(t, target_dtype, casting):
|
| 139 |
+
"""Dtype-cast tensor to target_dtype.
|
| 140 |
+
|
| 141 |
+
Parameters
|
| 142 |
+
----------
|
| 143 |
+
t : torch.Tensor
|
| 144 |
+
The tensor to cast
|
| 145 |
+
target_dtype : torch dtype object
|
| 146 |
+
The array dtype to cast all tensors to
|
| 147 |
+
casting : str
|
| 148 |
+
The casting mode, see `np.can_cast`
|
| 149 |
+
|
| 150 |
+
Returns
|
| 151 |
+
-------
|
| 152 |
+
`torch.Tensor` of the `target_dtype` dtype
|
| 153 |
+
|
| 154 |
+
Raises
|
| 155 |
+
------
|
| 156 |
+
ValueError
|
| 157 |
+
if the argument cannot be cast according to the `casting` rule
|
| 158 |
+
|
| 159 |
+
"""
|
| 160 |
+
can_cast = _dtypes_impl.can_cast_impl
|
| 161 |
+
|
| 162 |
+
if not can_cast(t.dtype, target_dtype, casting=casting):
|
| 163 |
+
raise TypeError(
|
| 164 |
+
f"Cannot cast array data from {t.dtype} to"
|
| 165 |
+
f" {target_dtype} according to the rule '{casting}'"
|
| 166 |
+
)
|
| 167 |
+
return cast_if_needed(t, target_dtype)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def typecast_tensors(tensors, target_dtype, casting):
|
| 171 |
+
return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0):
|
| 175 |
+
"""The core logic of the array(...) function.
|
| 176 |
+
|
| 177 |
+
Parameters
|
| 178 |
+
----------
|
| 179 |
+
obj : tensor_like
|
| 180 |
+
The thing to coerce
|
| 181 |
+
dtype : torch.dtype object or None
|
| 182 |
+
Coerce to this torch dtype
|
| 183 |
+
copy : bool
|
| 184 |
+
Copy or not
|
| 185 |
+
ndmin : int
|
| 186 |
+
The results as least this many dimensions
|
| 187 |
+
is_weak : bool
|
| 188 |
+
Whether obj is a weakly typed python scalar.
|
| 189 |
+
|
| 190 |
+
Returns
|
| 191 |
+
-------
|
| 192 |
+
tensor : torch.Tensor
|
| 193 |
+
a tensor object with requested dtype, ndim and copy semantics.
|
| 194 |
+
|
| 195 |
+
Notes
|
| 196 |
+
-----
|
| 197 |
+
This is almost a "tensor_like" coersion function. Does not handle wrapper
|
| 198 |
+
ndarrays (those should be handled in the ndarray-aware layer prior to
|
| 199 |
+
invoking this function).
|
| 200 |
+
"""
|
| 201 |
+
if isinstance(obj, torch.Tensor):
|
| 202 |
+
tensor = obj
|
| 203 |
+
else:
|
| 204 |
+
tensor = torch.as_tensor(obj)
|
| 205 |
+
|
| 206 |
+
# tensor.dtype is the pytorch default, typically float32. If obj's elements
|
| 207 |
+
# are not exactly representable in float32, we've lost precision:
|
| 208 |
+
# >>> torch.as_tensor(1e12).item() - 1e12
|
| 209 |
+
# -4096.0
|
| 210 |
+
|
| 211 |
+
# Therefore, we treat `tensor.dtype` as a hint, and convert the
|
| 212 |
+
# original object *again*, this time with an explicit dtype.
|
| 213 |
+
torch_dtype = _dtypes_impl.get_default_dtype_for(tensor.dtype)
|
| 214 |
+
tensor = torch.as_tensor(obj, dtype=torch_dtype)
|
| 215 |
+
|
| 216 |
+
# type cast if requested
|
| 217 |
+
tensor = cast_if_needed(tensor, dtype)
|
| 218 |
+
|
| 219 |
+
# adjust ndim if needed
|
| 220 |
+
ndim_extra = ndmin - tensor.ndim
|
| 221 |
+
if ndim_extra > 0:
|
| 222 |
+
tensor = tensor.view((1,) * ndim_extra + tensor.shape)
|
| 223 |
+
|
| 224 |
+
# copy if requested
|
| 225 |
+
if copy:
|
| 226 |
+
tensor = tensor.clone()
|
| 227 |
+
|
| 228 |
+
return tensor
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def ndarrays_to_tensors(*inputs):
|
| 232 |
+
"""Convert all ndarrays from `inputs` to tensors. (other things are intact)"""
|
| 233 |
+
from ._ndarray import ndarray
|
| 234 |
+
|
| 235 |
+
if len(inputs) == 0:
|
| 236 |
+
return ValueError()
|
| 237 |
+
elif len(inputs) == 1:
|
| 238 |
+
input_ = inputs[0]
|
| 239 |
+
if isinstance(input_, ndarray):
|
| 240 |
+
return input_.tensor
|
| 241 |
+
elif isinstance(input_, tuple):
|
| 242 |
+
result = []
|
| 243 |
+
for sub_input in input_:
|
| 244 |
+
sub_result = ndarrays_to_tensors(sub_input)
|
| 245 |
+
result.append(sub_result)
|
| 246 |
+
return tuple(result)
|
| 247 |
+
else:
|
| 248 |
+
return input_
|
| 249 |
+
else:
|
| 250 |
+
assert isinstance(inputs, tuple) # sanity check
|
| 251 |
+
return ndarrays_to_tensors(inputs)
|
llava_next/lib/python3.10/site-packages/torch/_numpy/random.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wrapper to mimic (parts of) np.random API surface.
|
| 2 |
+
|
| 3 |
+
NumPy has strict guarantees on reproducibility etc; here we don't give any.
|
| 4 |
+
|
| 5 |
+
Q: default dtype is float64 in numpy
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import functools
|
| 11 |
+
from math import sqrt
|
| 12 |
+
from typing import Optional
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
from . import _dtypes_impl, _util
|
| 17 |
+
from ._normalizations import array_or_scalar, ArrayLike, normalizer
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
"seed",
|
| 22 |
+
"random_sample",
|
| 23 |
+
"sample",
|
| 24 |
+
"random",
|
| 25 |
+
"rand",
|
| 26 |
+
"randn",
|
| 27 |
+
"normal",
|
| 28 |
+
"choice",
|
| 29 |
+
"randint",
|
| 30 |
+
"shuffle",
|
| 31 |
+
"uniform",
|
| 32 |
+
"USE_NUMPY_RANDOM",
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
USE_NUMPY_RANDOM = False
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def deco_stream(func):
|
| 40 |
+
@functools.wraps(func)
|
| 41 |
+
def inner(*args, **kwds):
|
| 42 |
+
if USE_NUMPY_RANDOM is False:
|
| 43 |
+
return func(*args, **kwds)
|
| 44 |
+
elif USE_NUMPY_RANDOM is True:
|
| 45 |
+
from numpy import random as nr
|
| 46 |
+
|
| 47 |
+
f = getattr(nr, func.__name__)
|
| 48 |
+
return f(*args, **kwds)
|
| 49 |
+
else:
|
| 50 |
+
raise ValueError(f"USE_NUMPY_RANDOM={USE_NUMPY_RANDOM} not understood.")
|
| 51 |
+
|
| 52 |
+
return inner
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@deco_stream
|
| 56 |
+
def seed(seed=None):
|
| 57 |
+
if seed is not None:
|
| 58 |
+
torch.random.manual_seed(seed)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@deco_stream
|
| 62 |
+
def random_sample(size=None):
|
| 63 |
+
if size is None:
|
| 64 |
+
size = ()
|
| 65 |
+
dtype = _dtypes_impl.default_dtypes().float_dtype
|
| 66 |
+
values = torch.empty(size, dtype=dtype).uniform_()
|
| 67 |
+
return array_or_scalar(values, return_scalar=size is None)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@deco_stream
|
| 71 |
+
def rand(*size):
|
| 72 |
+
return random_sample(size)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
sample = random_sample
|
| 76 |
+
random = random_sample
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@deco_stream
|
| 80 |
+
def uniform(low=0.0, high=1.0, size=None):
|
| 81 |
+
if size is None:
|
| 82 |
+
size = ()
|
| 83 |
+
dtype = _dtypes_impl.default_dtypes().float_dtype
|
| 84 |
+
values = torch.empty(size, dtype=dtype).uniform_(low, high)
|
| 85 |
+
return array_or_scalar(values, return_scalar=size is None)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@deco_stream
|
| 89 |
+
def randn(*size):
|
| 90 |
+
dtype = _dtypes_impl.default_dtypes().float_dtype
|
| 91 |
+
values = torch.randn(size, dtype=dtype)
|
| 92 |
+
return array_or_scalar(values, return_scalar=size is None)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@deco_stream
|
| 96 |
+
def normal(loc=0.0, scale=1.0, size=None):
|
| 97 |
+
if size is None:
|
| 98 |
+
size = ()
|
| 99 |
+
dtype = _dtypes_impl.default_dtypes().float_dtype
|
| 100 |
+
values = torch.empty(size, dtype=dtype).normal_(loc, scale)
|
| 101 |
+
return array_or_scalar(values, return_scalar=size is None)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@deco_stream
|
| 105 |
+
@normalizer
|
| 106 |
+
def shuffle(x: ArrayLike):
|
| 107 |
+
perm = torch.randperm(x.shape[0])
|
| 108 |
+
xp = x[perm]
|
| 109 |
+
x.copy_(xp)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@deco_stream
|
| 113 |
+
def randint(low, high=None, size=None):
|
| 114 |
+
if size is None:
|
| 115 |
+
size = ()
|
| 116 |
+
if not isinstance(size, (tuple, list)):
|
| 117 |
+
size = (size,)
|
| 118 |
+
if high is None:
|
| 119 |
+
low, high = 0, low
|
| 120 |
+
values = torch.randint(low, high, size=size)
|
| 121 |
+
return array_or_scalar(values, int, return_scalar=size is None)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@deco_stream
|
| 125 |
+
@normalizer
|
| 126 |
+
def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None):
|
| 127 |
+
# https://stackoverflow.com/questions/59461811/random-choice-with-pytorch
|
| 128 |
+
if a.numel() == 1:
|
| 129 |
+
a = torch.arange(a)
|
| 130 |
+
|
| 131 |
+
# TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises
|
| 132 |
+
|
| 133 |
+
# number of draws
|
| 134 |
+
if size is None:
|
| 135 |
+
num_el = 1
|
| 136 |
+
elif _util.is_sequence(size):
|
| 137 |
+
num_el = 1
|
| 138 |
+
for el in size:
|
| 139 |
+
num_el *= el
|
| 140 |
+
else:
|
| 141 |
+
num_el = size
|
| 142 |
+
|
| 143 |
+
# prepare the probabilities
|
| 144 |
+
if p is None:
|
| 145 |
+
p = torch.ones_like(a) / a.shape[0]
|
| 146 |
+
|
| 147 |
+
# cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973
|
| 148 |
+
atol = sqrt(torch.finfo(p.dtype).eps)
|
| 149 |
+
if abs(p.sum() - 1.0) > atol:
|
| 150 |
+
raise ValueError("probabilities do not sum to 1.")
|
| 151 |
+
|
| 152 |
+
# actually sample
|
| 153 |
+
indices = torch.multinomial(p, num_el, replacement=replace)
|
| 154 |
+
|
| 155 |
+
if _util.is_sequence(size):
|
| 156 |
+
indices = indices.reshape(size)
|
| 157 |
+
|
| 158 |
+
samples = a[indices]
|
| 159 |
+
|
| 160 |
+
return samples
|
llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .utils import (
|
| 2 |
+
_gen_alignment_data,
|
| 3 |
+
assert_,
|
| 4 |
+
assert_allclose,
|
| 5 |
+
assert_almost_equal,
|
| 6 |
+
assert_array_almost_equal,
|
| 7 |
+
assert_array_equal,
|
| 8 |
+
assert_array_less,
|
| 9 |
+
assert_equal,
|
| 10 |
+
assert_raises_regex,
|
| 11 |
+
assert_warns,
|
| 12 |
+
HAS_REFCOUNT,
|
| 13 |
+
IS_WASM,
|
| 14 |
+
suppress_warnings,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
# from .testing import assert_allclose # FIXME
|
llava_next/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (537 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc
ADDED
|
Binary file (2.53 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc
ADDED
|
Binary file (33.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc
ADDED
|
Binary file (814 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (207 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc
ADDED
|
Binary file (53.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc
ADDED
|
Binary file (23.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc
ADDED
|
Binary file (2.38 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc
ADDED
|
Binary file (24.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc
ADDED
|
Binary file (19.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc
ADDED
|
Binary file (3.77 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc
ADDED
|
Binary file (2.33 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (199 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc
ADDED
|
Binary file (30.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc
ADDED
|
Binary file (8.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (1.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/z3_types.cpython-310.pyc
ADDED
|
Binary file (706 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/ATen/ATenConfig.cmake
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Find the TH includes and library
|
| 2 |
+
#
|
| 3 |
+
# ATEN_INCLUDE_DIR -- where to find the includes
|
| 4 |
+
# ATEN_LIBRARIES -- list of libraries to link against
|
| 5 |
+
# ATEN_FOUND -- set to 1 if found
|
| 6 |
+
|
| 7 |
+
set(ATEN_FOUND 1)
|
| 8 |
+
set(ATEN_INCLUDE_DIR "/pytorch/torch/include")
|
| 9 |
+
set(ATEN_LIBRARIES "")
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# - Config file for the Caffe2 package
|
| 2 |
+
# It defines the following variable(s)
|
| 3 |
+
# CAFFE2_INCLUDE_DIRS - include directories for FooBar
|
| 4 |
+
# as well as Caffe2 targets for other cmake libraries to use.
|
| 5 |
+
|
| 6 |
+
# library version information
|
| 7 |
+
|
| 8 |
+
# Utils functions.
|
| 9 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/utils.cmake")
|
| 10 |
+
|
| 11 |
+
# Depending on whether Caffe2 uses gflags during compile time or
|
| 12 |
+
# not, invoke gflags.
|
| 13 |
+
if(OFF)
|
| 14 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/gflags.cmake")
|
| 15 |
+
if(NOT TARGET gflags)
|
| 16 |
+
message(FATAL_ERROR
|
| 17 |
+
"Your installed Caffe2 version uses gflags but the gflags library "
|
| 18 |
+
"cannot be found. Did you accidentally remove it, or have you set "
|
| 19 |
+
"the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not "
|
| 20 |
+
"have gflags, you will need to install gflags and set the library "
|
| 21 |
+
"path accordingly.")
|
| 22 |
+
endif()
|
| 23 |
+
endif()
|
| 24 |
+
|
| 25 |
+
# Depending on whether Caffe2 uses glog during compile time or
|
| 26 |
+
# not, invoke glog.
|
| 27 |
+
if(OFF)
|
| 28 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/glog.cmake")
|
| 29 |
+
if(NOT TARGET glog::glog)
|
| 30 |
+
message(FATAL_ERROR
|
| 31 |
+
"Your installed Caffe2 version uses glog but the glog library "
|
| 32 |
+
"cannot be found. Did you accidentally remove it, or have you set "
|
| 33 |
+
"the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not "
|
| 34 |
+
"have glog, you will need to install glog and set the library "
|
| 35 |
+
"path accordingly.")
|
| 36 |
+
endif()
|
| 37 |
+
endif()
|
| 38 |
+
|
| 39 |
+
# Protobuf
|
| 40 |
+
if(ON)
|
| 41 |
+
if(NOT TARGET protobuf::libprotobuf)
|
| 42 |
+
# Define protobuf::libprotobuf as a dummy target to resolve references to
|
| 43 |
+
# protobuf::libprotobuf in Caffe2Targets.cmake.
|
| 44 |
+
add_library(dummy INTERFACE)
|
| 45 |
+
add_library(protobuf::libprotobuf ALIAS dummy)
|
| 46 |
+
endif()
|
| 47 |
+
else()
|
| 48 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/protobuf.cmake")
|
| 49 |
+
if(NOT TARGET protobuf::libprotobuf)
|
| 50 |
+
message(FATAL_ERROR
|
| 51 |
+
"Your installed Caffe2 version uses protobuf but the protobuf library "
|
| 52 |
+
"cannot be found. Did you accidentally remove it, or have you set "
|
| 53 |
+
"the right CMAKE_PREFIX_PATH? If you do not have protobuf, you will "
|
| 54 |
+
"need to install protobuf and set the library path accordingly.")
|
| 55 |
+
endif()
|
| 56 |
+
message(STATUS "Caffe2: Protobuf version " ${Protobuf_VERSION})
|
| 57 |
+
# If during build time we know the protobuf version, we will also do a sanity
|
| 58 |
+
# check to ensure that the protobuf library that Caffe2 found is consistent
|
| 59 |
+
# with the compiled version.
|
| 60 |
+
if(FALSE)
|
| 61 |
+
if(NOT (${Protobuf_VERSION} VERSION_EQUAL Protobuf_VERSION_NOTFOUND))
|
| 62 |
+
message(FATAL_ERROR
|
| 63 |
+
"Your installed Caffe2 is built with protobuf "
|
| 64 |
+
"Protobuf_VERSION_NOTFOUND"
|
| 65 |
+
", while your current cmake setting discovers protobuf version "
|
| 66 |
+
${Protobuf_VERSION}
|
| 67 |
+
". Please specify a protobuf version that is the same as the built "
|
| 68 |
+
"version.")
|
| 69 |
+
endif()
|
| 70 |
+
endif()
|
| 71 |
+
endif()
|
| 72 |
+
|
| 73 |
+
if (OFF)
|
| 74 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/LoadHIP.cmake")
|
| 75 |
+
endif()
|
| 76 |
+
|
| 77 |
+
if(ON)
|
| 78 |
+
# The file public/cuda.cmake exclusively uses CAFFE2_USE_*.
|
| 79 |
+
# If Caffe2 was compiled with the libraries below, they must
|
| 80 |
+
# be found again when including the Caffe2 target.
|
| 81 |
+
set(CAFFE2_USE_CUDA ON)
|
| 82 |
+
set(CAFFE2_USE_TENSORRT OFF)
|
| 83 |
+
|
| 84 |
+
# Add current directory to module path so we pick up FindCUDAToolkit.cmake
|
| 85 |
+
set(old_CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}")
|
| 86 |
+
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
|
| 87 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/cuda.cmake")
|
| 88 |
+
set(CMAKE_MODULE_PATH "${old_CMAKE_MODULE_PATH}")
|
| 89 |
+
|
| 90 |
+
if(ON AND NOT CAFFE2_USE_CUDA)
|
| 91 |
+
message(FATAL_ERROR
|
| 92 |
+
"Your installed Caffe2 version uses CUDA but I cannot find the CUDA "
|
| 93 |
+
"libraries. Please set the proper CUDA prefixes and / or install "
|
| 94 |
+
"CUDA.")
|
| 95 |
+
endif()
|
| 96 |
+
if(OFF AND NOT CAFFE2_USE_TENSORRT)
|
| 97 |
+
message(FATAL_ERROR
|
| 98 |
+
"Your installed Caffe2 version uses TensorRT but I cannot find the TensorRT "
|
| 99 |
+
"libraries. Please set the proper TensorRT prefixes and / or install "
|
| 100 |
+
"TensorRT.")
|
| 101 |
+
endif()
|
| 102 |
+
endif()
|
| 103 |
+
|
| 104 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/mkl.cmake")
|
| 105 |
+
|
| 106 |
+
if(ON)
|
| 107 |
+
include("${CMAKE_CURRENT_LIST_DIR}/public/mkldnn.cmake")
|
| 108 |
+
endif()
|
| 109 |
+
|
| 110 |
+
# import targets
|
| 111 |
+
include ("${CMAKE_CURRENT_LIST_DIR}/Caffe2Targets.cmake")
|
| 112 |
+
|
| 113 |
+
# Interface libraries, that allows one to build proper link flags.
|
| 114 |
+
# We will also define a helper variable, Caffe2_MAIN_LIBS, that resolves to
|
| 115 |
+
# the main caffe2 libraries in cases of cuda presence / absence.
|
| 116 |
+
set(Caffe2_MAIN_LIBS torch_library)
|
| 117 |
+
|
| 118 |
+
# include directory.
|
| 119 |
+
#
|
| 120 |
+
# Newer versions of CMake set the INTERFACE_INCLUDE_DIRECTORIES property
|
| 121 |
+
# of the imported targets. It is hence not necessary to add this path
|
| 122 |
+
# manually to the include search path for targets which link to gflags.
|
| 123 |
+
# The following lines are here for backward compatibility, in case one
|
| 124 |
+
# would like to use the old-style include path.
|
| 125 |
+
get_filename_component(
|
| 126 |
+
CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 127 |
+
# Note: the current list dir is _INSTALL_PREFIX/share/cmake/Gloo.
|
| 128 |
+
get_filename_component(
|
| 129 |
+
_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE)
|
| 130 |
+
set(CAFFE2_INCLUDE_DIRS "${_INSTALL_PREFIX}/include")
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets-release.cmake
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#----------------------------------------------------------------
|
| 2 |
+
# Generated CMake target import file for configuration "Release".
|
| 3 |
+
#----------------------------------------------------------------
|
| 4 |
+
|
| 5 |
+
# Commands may need to know the format version.
|
| 6 |
+
set(CMAKE_IMPORT_FILE_VERSION 1)
|
| 7 |
+
|
| 8 |
+
# Import target "c10_cuda" for configuration "Release"
|
| 9 |
+
set_property(TARGET c10_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 10 |
+
set_target_properties(c10_cuda PROPERTIES
|
| 11 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libc10_cuda.so"
|
| 12 |
+
IMPORTED_SONAME_RELEASE "libc10_cuda.so"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
list(APPEND _IMPORT_CHECK_TARGETS c10_cuda )
|
| 16 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_c10_cuda "${_IMPORT_PREFIX}/lib/libc10_cuda.so" )
|
| 17 |
+
|
| 18 |
+
# Import target "c10" for configuration "Release"
|
| 19 |
+
set_property(TARGET c10 APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 20 |
+
set_target_properties(c10 PROPERTIES
|
| 21 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libc10.so"
|
| 22 |
+
IMPORTED_SONAME_RELEASE "libc10.so"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
list(APPEND _IMPORT_CHECK_TARGETS c10 )
|
| 26 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_c10 "${_IMPORT_PREFIX}/lib/libc10.so" )
|
| 27 |
+
|
| 28 |
+
# Import target "torch_cpu" for configuration "Release"
|
| 29 |
+
set_property(TARGET torch_cpu APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 30 |
+
set_target_properties(torch_cpu PROPERTIES
|
| 31 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch_cpu.so"
|
| 32 |
+
IMPORTED_SONAME_RELEASE "libtorch_cpu.so"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
list(APPEND _IMPORT_CHECK_TARGETS torch_cpu )
|
| 36 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_torch_cpu "${_IMPORT_PREFIX}/lib/libtorch_cpu.so" )
|
| 37 |
+
|
| 38 |
+
# Import target "torch_cuda" for configuration "Release"
|
| 39 |
+
set_property(TARGET torch_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 40 |
+
set_target_properties(torch_cuda PROPERTIES
|
| 41 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch_cuda.so"
|
| 42 |
+
IMPORTED_SONAME_RELEASE "libtorch_cuda.so"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
list(APPEND _IMPORT_CHECK_TARGETS torch_cuda )
|
| 46 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_torch_cuda "${_IMPORT_PREFIX}/lib/libtorch_cuda.so" )
|
| 47 |
+
|
| 48 |
+
# Import target "torch" for configuration "Release"
|
| 49 |
+
set_property(TARGET torch APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
| 50 |
+
set_target_properties(torch PROPERTIES
|
| 51 |
+
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch.so"
|
| 52 |
+
IMPORTED_SONAME_RELEASE "libtorch.so"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
list(APPEND _IMPORT_CHECK_TARGETS torch )
|
| 56 |
+
list(APPEND _IMPORT_CHECK_FILES_FOR_torch "${_IMPORT_PREFIX}/lib/libtorch.so" )
|
| 57 |
+
|
| 58 |
+
# Commands beyond this point should not need to know the version.
|
| 59 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets.cmake
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by CMake
|
| 2 |
+
|
| 3 |
+
if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5)
|
| 4 |
+
message(FATAL_ERROR "CMake >= 2.6.0 required")
|
| 5 |
+
endif()
|
| 6 |
+
cmake_policy(PUSH)
|
| 7 |
+
cmake_policy(VERSION 2.6...3.17)
|
| 8 |
+
#----------------------------------------------------------------
|
| 9 |
+
# Generated CMake target import file.
|
| 10 |
+
#----------------------------------------------------------------
|
| 11 |
+
|
| 12 |
+
# Commands may need to know the format version.
|
| 13 |
+
set(CMAKE_IMPORT_FILE_VERSION 1)
|
| 14 |
+
|
| 15 |
+
# Protect against multiple inclusion, which would fail when already imported targets are added once more.
|
| 16 |
+
set(_targetsDefined)
|
| 17 |
+
set(_targetsNotDefined)
|
| 18 |
+
set(_expectedTargets)
|
| 19 |
+
foreach(_expectedTarget c10_cuda c10 torch_cpu torch_cpu_library torch_cuda torch_cuda_library torch torch_library)
|
| 20 |
+
list(APPEND _expectedTargets ${_expectedTarget})
|
| 21 |
+
if(NOT TARGET ${_expectedTarget})
|
| 22 |
+
list(APPEND _targetsNotDefined ${_expectedTarget})
|
| 23 |
+
endif()
|
| 24 |
+
if(TARGET ${_expectedTarget})
|
| 25 |
+
list(APPEND _targetsDefined ${_expectedTarget})
|
| 26 |
+
endif()
|
| 27 |
+
endforeach()
|
| 28 |
+
if("${_targetsDefined}" STREQUAL "${_expectedTargets}")
|
| 29 |
+
unset(_targetsDefined)
|
| 30 |
+
unset(_targetsNotDefined)
|
| 31 |
+
unset(_expectedTargets)
|
| 32 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
| 33 |
+
cmake_policy(POP)
|
| 34 |
+
return()
|
| 35 |
+
endif()
|
| 36 |
+
if(NOT "${_targetsDefined}" STREQUAL "")
|
| 37 |
+
message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n")
|
| 38 |
+
endif()
|
| 39 |
+
unset(_targetsDefined)
|
| 40 |
+
unset(_targetsNotDefined)
|
| 41 |
+
unset(_expectedTargets)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Compute the installation prefix relative to this file.
|
| 45 |
+
get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 46 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 47 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 48 |
+
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
| 49 |
+
if(_IMPORT_PREFIX STREQUAL "/")
|
| 50 |
+
set(_IMPORT_PREFIX "")
|
| 51 |
+
endif()
|
| 52 |
+
|
| 53 |
+
# Create imported target c10_cuda
|
| 54 |
+
add_library(c10_cuda SHARED IMPORTED)
|
| 55 |
+
|
| 56 |
+
set_target_properties(c10_cuda PROPERTIES
|
| 57 |
+
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include"
|
| 58 |
+
INTERFACE_LINK_LIBRARIES "c10;torch::cudart"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Create imported target c10
|
| 62 |
+
add_library(c10 SHARED IMPORTED)
|
| 63 |
+
|
| 64 |
+
set_target_properties(c10 PROPERTIES
|
| 65 |
+
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Create imported target torch_cpu
|
| 69 |
+
add_library(torch_cpu SHARED IMPORTED)
|
| 70 |
+
|
| 71 |
+
set_target_properties(torch_cpu PROPERTIES
|
| 72 |
+
INTERFACE_COMPILE_DEFINITIONS "USE_DISTRIBUTED;USE_C10D_GLOO;USE_RPC;USE_TENSORPIPE"
|
| 73 |
+
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include"
|
| 74 |
+
INTERFACE_LINK_LIBRARIES "protobuf::libprotobuf;c10;caffe2::mkl"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# Create imported target torch_cpu_library
|
| 78 |
+
add_library(torch_cpu_library INTERFACE IMPORTED)
|
| 79 |
+
|
| 80 |
+
set_target_properties(torch_cpu_library PROPERTIES
|
| 81 |
+
INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_COMPILE_DEFINITIONS>"
|
| 82 |
+
INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_COMPILE_OPTIONS>"
|
| 83 |
+
INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_INCLUDE_DIRECTORIES>"
|
| 84 |
+
INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$<TARGET_FILE:torch_cpu>\" -Wl,--as-needed;\$<TARGET_PROPERTY:torch_cpu,INTERFACE_LINK_LIBRARIES>"
|
| 85 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# Create imported target torch_cuda
|
| 89 |
+
add_library(torch_cuda SHARED IMPORTED)
|
| 90 |
+
|
| 91 |
+
set_target_properties(torch_cuda PROPERTIES
|
| 92 |
+
INTERFACE_COMPILE_DEFINITIONS "USE_C10D_NCCL"
|
| 93 |
+
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include"
|
| 94 |
+
INTERFACE_LINK_LIBRARIES "torch::cudart;c10_cuda;torch::nvtoolsext;torch_cpu_library;caffe2::cufft;caffe2::curand;caffe2::cublas"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Create imported target torch_cuda_library
|
| 98 |
+
add_library(torch_cuda_library INTERFACE IMPORTED)
|
| 99 |
+
|
| 100 |
+
set_target_properties(torch_cuda_library PROPERTIES
|
| 101 |
+
INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_COMPILE_DEFINITIONS>"
|
| 102 |
+
INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_COMPILE_OPTIONS>"
|
| 103 |
+
INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_INCLUDE_DIRECTORIES>"
|
| 104 |
+
INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$<TARGET_FILE:torch_cuda>\" -Wl,--as-needed;\$<TARGET_PROPERTY:torch_cuda,INTERFACE_LINK_LIBRARIES>"
|
| 105 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>"
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# Create imported target torch
|
| 109 |
+
add_library(torch SHARED IMPORTED)
|
| 110 |
+
|
| 111 |
+
set_target_properties(torch PROPERTIES
|
| 112 |
+
INTERFACE_LINK_LIBRARIES "torch_cpu_library;torch_cuda_library"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Create imported target torch_library
|
| 116 |
+
add_library(torch_library INTERFACE IMPORTED)
|
| 117 |
+
|
| 118 |
+
set_target_properties(torch_library PROPERTIES
|
| 119 |
+
INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch,INTERFACE_COMPILE_DEFINITIONS>"
|
| 120 |
+
INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch,INTERFACE_COMPILE_OPTIONS>"
|
| 121 |
+
INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch,INTERFACE_INCLUDE_DIRECTORIES>"
|
| 122 |
+
INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$<TARGET_FILE:torch>\" -Wl,--as-needed;\$<TARGET_PROPERTY:torch,INTERFACE_LINK_LIBRARIES>"
|
| 123 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>"
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if(CMAKE_VERSION VERSION_LESS 3.0.0)
|
| 127 |
+
message(FATAL_ERROR "This file relies on consumers using CMake 3.0.0 or greater.")
|
| 128 |
+
endif()
|
| 129 |
+
|
| 130 |
+
# Load information for each installed configuration.
|
| 131 |
+
get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 132 |
+
file(GLOB CONFIG_FILES "${_DIR}/Caffe2Targets-*.cmake")
|
| 133 |
+
foreach(f ${CONFIG_FILES})
|
| 134 |
+
include(${f})
|
| 135 |
+
endforeach()
|
| 136 |
+
|
| 137 |
+
# Cleanup temporary variables.
|
| 138 |
+
set(_IMPORT_PREFIX)
|
| 139 |
+
|
| 140 |
+
# Loop over all imported files and verify that they actually exist
|
| 141 |
+
foreach(target ${_IMPORT_CHECK_TARGETS} )
|
| 142 |
+
foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} )
|
| 143 |
+
if(NOT EXISTS "${file}" )
|
| 144 |
+
message(FATAL_ERROR "The imported target \"${target}\" references the file
|
| 145 |
+
\"${file}\"
|
| 146 |
+
but this file does not exist. Possible reasons include:
|
| 147 |
+
* The file was deleted, renamed, or moved to another location.
|
| 148 |
+
* An install or uninstall procedure did not complete successfully.
|
| 149 |
+
* The installation package was faulty and contained
|
| 150 |
+
\"${CMAKE_CURRENT_LIST_FILE}\"
|
| 151 |
+
but not all the files it references.
|
| 152 |
+
")
|
| 153 |
+
endif()
|
| 154 |
+
endforeach()
|
| 155 |
+
unset(_IMPORT_CHECK_FILES_FOR_${target})
|
| 156 |
+
endforeach()
|
| 157 |
+
unset(_IMPORT_CHECK_TARGETS)
|
| 158 |
+
|
| 159 |
+
# Make sure the targets which have been exported in some other
|
| 160 |
+
# export set exist.
|
| 161 |
+
unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
|
| 162 |
+
foreach(_target "protobuf::libprotobuf" )
|
| 163 |
+
if(NOT TARGET "${_target}" )
|
| 164 |
+
set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
|
| 165 |
+
endif()
|
| 166 |
+
endforeach()
|
| 167 |
+
|
| 168 |
+
if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
|
| 169 |
+
if(CMAKE_FIND_PACKAGE_NAME)
|
| 170 |
+
set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
|
| 171 |
+
set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
|
| 172 |
+
else()
|
| 173 |
+
message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
|
| 174 |
+
endif()
|
| 175 |
+
endif()
|
| 176 |
+
unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
|
| 177 |
+
|
| 178 |
+
# Commands beyond this point should not need to know the version.
|
| 179 |
+
set(CMAKE_IMPORT_FILE_VERSION)
|
| 180 |
+
cmake_policy(POP)
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake
ADDED
|
@@ -0,0 +1,1073 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# This module is back-ported from CMake 3.17 and above to work with CMake 3.10
|
| 3 |
+
|
| 4 |
+
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
|
| 5 |
+
# file Copyright.txt or https://cmake.org/licensing for details.
|
| 6 |
+
|
| 7 |
+
#[=======================================================================[.rst:
|
| 8 |
+
FindCUDAToolkit
|
| 9 |
+
---------------
|
| 10 |
+
|
| 11 |
+
.. versionadded:: 3.17
|
| 12 |
+
|
| 13 |
+
This script locates the NVIDIA CUDA toolkit and the associated libraries, but
|
| 14 |
+
does not require the ``CUDA`` language be enabled for a given project. This
|
| 15 |
+
module does not search for the NVIDIA CUDA Samples.
|
| 16 |
+
|
| 17 |
+
.. versionadded:: 3.19
|
| 18 |
+
QNX support.
|
| 19 |
+
|
| 20 |
+
Search Behavior
|
| 21 |
+
^^^^^^^^^^^^^^^
|
| 22 |
+
|
| 23 |
+
The CUDA Toolkit search behavior uses the following order:
|
| 24 |
+
|
| 25 |
+
1. If the ``CUDA`` language has been enabled we will use the directory
|
| 26 |
+
containing the compiler as the first search location for ``nvcc``.
|
| 27 |
+
|
| 28 |
+
2. If the ``CUDAToolkit_ROOT`` cmake configuration variable (e.g.,
|
| 29 |
+
``-DCUDAToolkit_ROOT=/some/path``) *or* environment variable is defined, it
|
| 30 |
+
will be searched. If both an environment variable **and** a
|
| 31 |
+
configuration variable are specified, the *configuration* variable takes
|
| 32 |
+
precedence.
|
| 33 |
+
|
| 34 |
+
The directory specified here must be such that the executable ``nvcc`` or
|
| 35 |
+
the appropriate ``version.txt`` file can be found underneath the specified
|
| 36 |
+
directory.
|
| 37 |
+
|
| 38 |
+
3. If the CUDA_PATH environment variable is defined, it will be searched
|
| 39 |
+
for ``nvcc``.
|
| 40 |
+
|
| 41 |
+
4. The user's path is searched for ``nvcc`` using :command:`find_program`. If
|
| 42 |
+
this is found, no subsequent search attempts are performed. Users are
|
| 43 |
+
responsible for ensuring that the first ``nvcc`` to show up in the path is
|
| 44 |
+
the desired path in the event that multiple CUDA Toolkits are installed.
|
| 45 |
+
|
| 46 |
+
5. On Unix systems, if the symbolic link ``/usr/local/cuda`` exists, this is
|
| 47 |
+
used. No subsequent search attempts are performed. No default symbolic link
|
| 48 |
+
location exists for the Windows platform.
|
| 49 |
+
|
| 50 |
+
6. The platform specific default install locations are searched. If exactly one
|
| 51 |
+
candidate is found, this is used. The default CUDA Toolkit install locations
|
| 52 |
+
searched are:
|
| 53 |
+
|
| 54 |
+
+-------------+-------------------------------------------------------------+
|
| 55 |
+
| Platform | Search Pattern |
|
| 56 |
+
+=============+=============================================================+
|
| 57 |
+
| macOS | ``/Developer/NVIDIA/CUDA-X.Y`` |
|
| 58 |
+
+-------------+-------------------------------------------------------------+
|
| 59 |
+
| Other Unix | ``/usr/local/cuda-X.Y`` |
|
| 60 |
+
+-------------+-------------------------------------------------------------+
|
| 61 |
+
| Windows | ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y`` |
|
| 62 |
+
+-------------+-------------------------------------------------------------+
|
| 63 |
+
|
| 64 |
+
Where ``X.Y`` would be a specific version of the CUDA Toolkit, such as
|
| 65 |
+
``/usr/local/cuda-9.0`` or
|
| 66 |
+
``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0``
|
| 67 |
+
|
| 68 |
+
.. note::
|
| 69 |
+
|
| 70 |
+
When multiple CUDA Toolkits are installed in the default location of a
|
| 71 |
+
system(e.g., both ``/usr/local/cuda-9.0`` and ``/usr/local/cuda-10.0``
|
| 72 |
+
exist but the ``/usr/local/cuda`` symbolic link does **not** exist), this
|
| 73 |
+
package is marked as **not** found.
|
| 74 |
+
|
| 75 |
+
There are too many factors involved in making an automatic decision in
|
| 76 |
+
the presence of multiple CUDA Toolkits being installed. In this
|
| 77 |
+
situation, users are encouraged to either (1) set ``CUDAToolkit_ROOT`` or
|
| 78 |
+
(2) ensure that the correct ``nvcc`` executable shows up in ``$PATH`` for
|
| 79 |
+
:command:`find_program` to find.
|
| 80 |
+
|
| 81 |
+
Arguments
|
| 82 |
+
^^^^^^^^^
|
| 83 |
+
|
| 84 |
+
``[<version>]``
|
| 85 |
+
The ``[<version>]`` argument requests a version with which the package found
|
| 86 |
+
should be compatible. See :ref:`find_package version format <FIND_PACKAGE_VERSION_FORMAT>`
|
| 87 |
+
for more details.
|
| 88 |
+
|
| 89 |
+
Options
|
| 90 |
+
^^^^^^^
|
| 91 |
+
|
| 92 |
+
``REQUIRED``
|
| 93 |
+
If specified, configuration will error if a suitable CUDA Toolkit is not
|
| 94 |
+
found.
|
| 95 |
+
|
| 96 |
+
``QUIET``
|
| 97 |
+
If specified, the search for a suitable CUDA Toolkit will not produce any
|
| 98 |
+
messages.
|
| 99 |
+
|
| 100 |
+
``EXACT``
|
| 101 |
+
If specified, the CUDA Toolkit is considered found only if the exact
|
| 102 |
+
``VERSION`` specified is recovered.
|
| 103 |
+
|
| 104 |
+
Imported targets
|
| 105 |
+
^^^^^^^^^^^^^^^^
|
| 106 |
+
|
| 107 |
+
An :ref:`imported target <Imported targets>` named ``CUDA::toolkit`` is provided.
|
| 108 |
+
|
| 109 |
+
This module defines :prop_tgt:`IMPORTED` targets for each
|
| 110 |
+
of the following libraries that are part of the CUDAToolkit:
|
| 111 |
+
|
| 112 |
+
- :ref:`CUDA Runtime Library<cuda_toolkit_rt_lib>`
|
| 113 |
+
- :ref:`CUDA Driver Library<cuda_toolkit_driver_lib>`
|
| 114 |
+
- :ref:`cuBLAS<cuda_toolkit_cuBLAS>`
|
| 115 |
+
- :ref:`cuFFT<cuda_toolkit_cuFFT>`
|
| 116 |
+
- :ref:`cuRAND<cuda_toolkit_cuRAND>`
|
| 117 |
+
- :ref:`cuSOLVER<cuda_toolkit_cuSOLVER>`
|
| 118 |
+
- :ref:`cuSPARSE<cuda_toolkit_cuSPARSE>`
|
| 119 |
+
- :ref:`cuPTI<cuda_toolkit_cupti>`
|
| 120 |
+
- :ref:`NPP<cuda_toolkit_NPP>`
|
| 121 |
+
- :ref:`nvBLAS<cuda_toolkit_nvBLAS>`
|
| 122 |
+
- :ref:`nvGRAPH<cuda_toolkit_nvGRAPH>`
|
| 123 |
+
- :ref:`nvJPEG<cuda_toolkit_nvJPEG>`
|
| 124 |
+
- :ref:`nvidia-ML<cuda_toolkit_nvML>`
|
| 125 |
+
- :ref:`nvRTC<cuda_toolkit_nvRTC>`
|
| 126 |
+
- :ref:`nvToolsExt<cuda_toolkit_nvToolsExt>`
|
| 127 |
+
- :ref:`OpenCL<cuda_toolkit_opencl>`
|
| 128 |
+
- :ref:`cuLIBOS<cuda_toolkit_cuLIBOS>`
|
| 129 |
+
|
| 130 |
+
.. _`cuda_toolkit_rt_lib`:
|
| 131 |
+
|
| 132 |
+
CUDA Runtime Library
|
| 133 |
+
""""""""""""""""""""
|
| 134 |
+
|
| 135 |
+
The CUDA Runtime library (cudart) are what most applications will typically
|
| 136 |
+
need to link against to make any calls such as `cudaMalloc`, and `cudaFree`.
|
| 137 |
+
|
| 138 |
+
Targets Created:
|
| 139 |
+
|
| 140 |
+
- ``CUDA::cudart``
|
| 141 |
+
- ``CUDA::cudart_static``
|
| 142 |
+
|
| 143 |
+
.. _`cuda_toolkit_driver_lib`:
|
| 144 |
+
|
| 145 |
+
CUDA Driver Library
|
| 146 |
+
""""""""""""""""""""
|
| 147 |
+
|
| 148 |
+
The CUDA Driver library (cuda) are used by applications that use calls
|
| 149 |
+
such as `cuMemAlloc`, and `cuMemFree`.
|
| 150 |
+
|
| 151 |
+
Targets Created:
|
| 152 |
+
|
| 153 |
+
- ``CUDA::cuda_driver``
|
| 154 |
+
|
| 155 |
+
.. _`cuda_toolkit_cuBLAS`:
|
| 156 |
+
|
| 157 |
+
cuBLAS
|
| 158 |
+
""""""
|
| 159 |
+
|
| 160 |
+
The `cuBLAS <https://docs.nvidia.com/cuda/cublas/index.html>`_ library.
|
| 161 |
+
|
| 162 |
+
Targets Created:
|
| 163 |
+
|
| 164 |
+
- ``CUDA::cublas``
|
| 165 |
+
- ``CUDA::cublas_static``
|
| 166 |
+
- ``CUDA::cublasLt`` starting in CUDA 10.1
|
| 167 |
+
- ``CUDA::cublasLt_static`` starting in CUDA 10.1
|
| 168 |
+
|
| 169 |
+
.. _`cuda_toolkit_cuFFT`:
|
| 170 |
+
|
| 171 |
+
cuFFT
|
| 172 |
+
"""""
|
| 173 |
+
|
| 174 |
+
The `cuFFT <https://docs.nvidia.com/cuda/cufft/index.html>`_ library.
|
| 175 |
+
|
| 176 |
+
Targets Created:
|
| 177 |
+
|
| 178 |
+
- ``CUDA::cufft``
|
| 179 |
+
- ``CUDA::cufftw``
|
| 180 |
+
- ``CUDA::cufft_static``
|
| 181 |
+
- ``CUDA::cufft_static_nocallback`` starting in CUDA 9.2, requires CMake 3.23+
|
| 182 |
+
- ``CUDA::cufftw_static``
|
| 183 |
+
|
| 184 |
+
cuRAND
|
| 185 |
+
""""""
|
| 186 |
+
|
| 187 |
+
The `cuRAND <https://docs.nvidia.com/cuda/curand/index.html>`_ library.
|
| 188 |
+
|
| 189 |
+
Targets Created:
|
| 190 |
+
|
| 191 |
+
- ``CUDA::curand``
|
| 192 |
+
- ``CUDA::curand_static``
|
| 193 |
+
|
| 194 |
+
.. _`cuda_toolkit_cuSOLVER`:
|
| 195 |
+
|
| 196 |
+
cuSOLVER
|
| 197 |
+
""""""""
|
| 198 |
+
|
| 199 |
+
The `cuSOLVER <https://docs.nvidia.com/cuda/cusolver/index.html>`_ library.
|
| 200 |
+
|
| 201 |
+
Targets Created:
|
| 202 |
+
|
| 203 |
+
- ``CUDA::cusolver``
|
| 204 |
+
- ``CUDA::cusolver_static``
|
| 205 |
+
|
| 206 |
+
.. _`cuda_toolkit_cuSPARSE`:
|
| 207 |
+
|
| 208 |
+
cuSPARSE
|
| 209 |
+
""""""""
|
| 210 |
+
|
| 211 |
+
The `cuSPARSE <https://docs.nvidia.com/cuda/cusparse/index.html>`_ library.
|
| 212 |
+
|
| 213 |
+
Targets Created:
|
| 214 |
+
|
| 215 |
+
- ``CUDA::cusparse``
|
| 216 |
+
- ``CUDA::cusparse_static``
|
| 217 |
+
|
| 218 |
+
.. _`cuda_toolkit_cupti`:
|
| 219 |
+
|
| 220 |
+
cupti
|
| 221 |
+
"""""
|
| 222 |
+
|
| 223 |
+
The `NVIDIA CUDA Profiling Tools Interface <https://developer.nvidia.com/CUPTI>`_.
|
| 224 |
+
|
| 225 |
+
Targets Created:
|
| 226 |
+
|
| 227 |
+
- ``CUDA::cupti``
|
| 228 |
+
- ``CUDA::cupti_static``
|
| 229 |
+
|
| 230 |
+
.. _`cuda_toolkit_NPP`:
|
| 231 |
+
|
| 232 |
+
NPP
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
The `NPP <https://docs.nvidia.com/cuda/npp/index.html>`_ libraries.
|
| 236 |
+
|
| 237 |
+
Targets Created:
|
| 238 |
+
|
| 239 |
+
- `nppc`:
|
| 240 |
+
|
| 241 |
+
- ``CUDA::nppc``
|
| 242 |
+
- ``CUDA::nppc_static``
|
| 243 |
+
|
| 244 |
+
- `nppial`: Arithmetic and logical operation functions in `nppi_arithmetic_and_logical_operations.h`
|
| 245 |
+
|
| 246 |
+
- ``CUDA::nppial``
|
| 247 |
+
- ``CUDA::nppial_static``
|
| 248 |
+
|
| 249 |
+
- `nppicc`: Color conversion and sampling functions in `nppi_color_conversion.h`
|
| 250 |
+
|
| 251 |
+
- ``CUDA::nppicc``
|
| 252 |
+
- ``CUDA::nppicc_static``
|
| 253 |
+
|
| 254 |
+
- `nppicom`: JPEG compression and decompression functions in `nppi_compression_functions.h`
|
| 255 |
+
Removed starting in CUDA 11.0, use :ref:`nvJPEG<cuda_toolkit_nvJPEG>` instead.
|
| 256 |
+
|
| 257 |
+
- ``CUDA::nppicom``
|
| 258 |
+
- ``CUDA::nppicom_static``
|
| 259 |
+
|
| 260 |
+
- `nppidei`: Data exchange and initialization functions in `nppi_data_exchange_and_initialization.h`
|
| 261 |
+
|
| 262 |
+
- ``CUDA::nppidei``
|
| 263 |
+
- ``CUDA::nppidei_static``
|
| 264 |
+
|
| 265 |
+
- `nppif`: Filtering and computer vision functions in `nppi_filter_functions.h`
|
| 266 |
+
|
| 267 |
+
- ``CUDA::nppif``
|
| 268 |
+
- ``CUDA::nppif_static``
|
| 269 |
+
|
| 270 |
+
- `nppig`: Geometry transformation functions found in `nppi_geometry_transforms.h`
|
| 271 |
+
|
| 272 |
+
- ``CUDA::nppig``
|
| 273 |
+
- ``CUDA::nppig_static``
|
| 274 |
+
|
| 275 |
+
- `nppim`: Morphological operation functions found in `nppi_morphological_operations.h`
|
| 276 |
+
|
| 277 |
+
- ``CUDA::nppim``
|
| 278 |
+
- ``CUDA::nppim_static``
|
| 279 |
+
|
| 280 |
+
- `nppist`: Statistics and linear transform in `nppi_statistics_functions.h` and `nppi_linear_transforms.h`
|
| 281 |
+
|
| 282 |
+
- ``CUDA::nppist``
|
| 283 |
+
- ``CUDA::nppist_static``
|
| 284 |
+
|
| 285 |
+
- `nppisu`: Memory support functions in `nppi_support_functions.h`
|
| 286 |
+
|
| 287 |
+
- ``CUDA::nppisu``
|
| 288 |
+
- ``CUDA::nppisu_static``
|
| 289 |
+
|
| 290 |
+
- `nppitc`: Threshold and compare operation functions in `nppi_threshold_and_compare_operations.h`
|
| 291 |
+
|
| 292 |
+
- ``CUDA::nppitc``
|
| 293 |
+
- ``CUDA::nppitc_static``
|
| 294 |
+
|
| 295 |
+
- `npps`:
|
| 296 |
+
|
| 297 |
+
- ``CUDA::npps``
|
| 298 |
+
- ``CUDA::npps_static``
|
| 299 |
+
|
| 300 |
+
.. _`cuda_toolkit_nvBLAS`:
|
| 301 |
+
|
| 302 |
+
nvBLAS
|
| 303 |
+
""""""
|
| 304 |
+
|
| 305 |
+
The `nvBLAS <https://docs.nvidia.com/cuda/nvblas/index.html>`_ libraries.
|
| 306 |
+
This is a shared library only.
|
| 307 |
+
|
| 308 |
+
Targets Created:
|
| 309 |
+
|
| 310 |
+
- ``CUDA::nvblas``
|
| 311 |
+
|
| 312 |
+
.. _`cuda_toolkit_nvGRAPH`:
|
| 313 |
+
|
| 314 |
+
nvGRAPH
|
| 315 |
+
"""""""
|
| 316 |
+
|
| 317 |
+
The `nvGRAPH <https://docs.nvidia.com/cuda/nvgraph/index.html>`_ library.
|
| 318 |
+
Removed starting in CUDA 11.0
|
| 319 |
+
|
| 320 |
+
Targets Created:
|
| 321 |
+
|
| 322 |
+
- ``CUDA::nvgraph``
|
| 323 |
+
- ``CUDA::nvgraph_static``
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
.. _`cuda_toolkit_nvJPEG`:
|
| 327 |
+
|
| 328 |
+
nvJPEG
|
| 329 |
+
""""""
|
| 330 |
+
|
| 331 |
+
The `nvJPEG <https://docs.nvidia.com/cuda/nvjpeg/index.html>`_ library.
|
| 332 |
+
Introduced in CUDA 10.
|
| 333 |
+
|
| 334 |
+
Targets Created:
|
| 335 |
+
|
| 336 |
+
- ``CUDA::nvjpeg``
|
| 337 |
+
- ``CUDA::nvjpeg_static``
|
| 338 |
+
|
| 339 |
+
.. _`cuda_toolkit_nvRTC`:
|
| 340 |
+
|
| 341 |
+
nvRTC
|
| 342 |
+
"""""
|
| 343 |
+
|
| 344 |
+
The `nvRTC <https://docs.nvidia.com/cuda/nvrtc/index.html>`_ (Runtime Compilation) library.
|
| 345 |
+
This is a shared library only.
|
| 346 |
+
|
| 347 |
+
Targets Created:
|
| 348 |
+
|
| 349 |
+
- ``CUDA::nvrtc``
|
| 350 |
+
|
| 351 |
+
.. _`cuda_toolkit_nvml`:
|
| 352 |
+
|
| 353 |
+
nvidia-ML
|
| 354 |
+
"""""""""
|
| 355 |
+
|
| 356 |
+
The `NVIDIA Management Library <https://developer.nvidia.com/nvidia-management-library-nvml>`_.
|
| 357 |
+
This is a shared library only.
|
| 358 |
+
|
| 359 |
+
Targets Created:
|
| 360 |
+
|
| 361 |
+
- ``CUDA::nvml``
|
| 362 |
+
|
| 363 |
+
.. _`cuda_toolkit_nvToolsExt`:
|
| 364 |
+
|
| 365 |
+
nvToolsExt
|
| 366 |
+
""""""""""
|
| 367 |
+
|
| 368 |
+
The `NVIDIA Tools Extension <https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm>`_.
|
| 369 |
+
This is a shared library only.
|
| 370 |
+
|
| 371 |
+
Targets Created:
|
| 372 |
+
|
| 373 |
+
- ``CUDA::nvToolsExt``
|
| 374 |
+
|
| 375 |
+
.. _`cuda_toolkit_opencl`:
|
| 376 |
+
|
| 377 |
+
OpenCL
|
| 378 |
+
""""""
|
| 379 |
+
|
| 380 |
+
The `NVIDIA OpenCL Library <https://developer.nvidia.com/opencl>`_.
|
| 381 |
+
This is a shared library only.
|
| 382 |
+
|
| 383 |
+
Targets Created:
|
| 384 |
+
|
| 385 |
+
- ``CUDA::OpenCL``
|
| 386 |
+
|
| 387 |
+
.. _`cuda_toolkit_cuLIBOS`:
|
| 388 |
+
|
| 389 |
+
cuLIBOS
|
| 390 |
+
"""""""
|
| 391 |
+
|
| 392 |
+
The cuLIBOS library is a backend thread abstraction layer library which is
|
| 393 |
+
static only. The ``CUDA::cublas_static``, ``CUDA::cusparse_static``,
|
| 394 |
+
``CUDA::cufft_static``, ``CUDA::curand_static``, and (when implemented) NPP
|
| 395 |
+
libraries all automatically have this dependency linked.
|
| 396 |
+
|
| 397 |
+
Target Created:
|
| 398 |
+
|
| 399 |
+
- ``CUDA::culibos``
|
| 400 |
+
|
| 401 |
+
**Note**: direct usage of this target by consumers should not be necessary.
|
| 402 |
+
|
| 403 |
+
.. _`cuda_toolkit_cuRAND`:
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
Result variables
|
| 408 |
+
^^^^^^^^^^^^^^^^
|
| 409 |
+
|
| 410 |
+
``CUDAToolkit_FOUND``
|
| 411 |
+
A boolean specifying whether or not the CUDA Toolkit was found.
|
| 412 |
+
|
| 413 |
+
``CUDAToolkit_VERSION``
|
| 414 |
+
The exact version of the CUDA Toolkit found (as reported by
|
| 415 |
+
``nvcc --version`` or ``version.txt``).
|
| 416 |
+
|
| 417 |
+
``CUDAToolkit_VERSION_MAJOR``
|
| 418 |
+
The major version of the CUDA Toolkit.
|
| 419 |
+
|
| 420 |
+
``CUDAToolkit_VERSION_MINOR``
|
| 421 |
+
The minor version of the CUDA Toolkit.
|
| 422 |
+
|
| 423 |
+
``CUDAToolkit_VERSION_PATCH``
|
| 424 |
+
The patch version of the CUDA Toolkit.
|
| 425 |
+
|
| 426 |
+
``CUDAToolkit_BIN_DIR``
|
| 427 |
+
The path to the CUDA Toolkit library directory that contains the CUDA
|
| 428 |
+
executable ``nvcc``.
|
| 429 |
+
|
| 430 |
+
``CUDAToolkit_INCLUDE_DIRS``
|
| 431 |
+
The path to the CUDA Toolkit ``include`` folder containing the header files
|
| 432 |
+
required to compile a project linking against CUDA.
|
| 433 |
+
|
| 434 |
+
``CUDAToolkit_LIBRARY_DIR``
|
| 435 |
+
The path to the CUDA Toolkit library directory that contains the CUDA
|
| 436 |
+
Runtime library ``cudart``.
|
| 437 |
+
|
| 438 |
+
``CUDAToolkit_LIBRARY_ROOT``
|
| 439 |
+
.. versionadded:: 3.18
|
| 440 |
+
|
| 441 |
+
The path to the CUDA Toolkit directory containing the nvvm directory and
|
| 442 |
+
version.txt.
|
| 443 |
+
|
| 444 |
+
``CUDAToolkit_TARGET_DIR``
|
| 445 |
+
The path to the CUDA Toolkit directory including the target architecture
|
| 446 |
+
when cross-compiling. When not cross-compiling this will be equivalent to
|
| 447 |
+
the parent directory of ``CUDAToolkit_BIN_DIR``.
|
| 448 |
+
|
| 449 |
+
``CUDAToolkit_NVCC_EXECUTABLE``
|
| 450 |
+
The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may
|
| 451 |
+
**not** be the same as
|
| 452 |
+
:variable:`CMAKE_CUDA_COMPILER <CMAKE_<LANG>_COMPILER>`. ``nvcc`` must be
|
| 453 |
+
found to determine the CUDA Toolkit version as well as determining other
|
| 454 |
+
features of the Toolkit. This variable is set for the convenience of
|
| 455 |
+
modules that depend on this one.
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
#]=======================================================================]
|
| 459 |
+
|
| 460 |
+
# NOTE: much of this was simply extracted from FindCUDA.cmake.
|
| 461 |
+
|
| 462 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 463 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 464 |
+
#
|
| 465 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 466 |
+
#
|
| 467 |
+
# Copyright (c) 2007-2009
|
| 468 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 469 |
+
#
|
| 470 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 471 |
+
# for the text of the license.
|
| 472 |
+
|
| 473 |
+
# The MIT License
|
| 474 |
+
#
|
| 475 |
+
# License for the specific language governing rights and limitations under
|
| 476 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 477 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 478 |
+
# to deal in the Software without restriction, including without limitation
|
| 479 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 480 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 481 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 482 |
+
#
|
| 483 |
+
# The above copyright notice and this permission notice shall be included
|
| 484 |
+
# in all copies or substantial portions of the Software.
|
| 485 |
+
#
|
| 486 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 487 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 488 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 489 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 490 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 491 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 492 |
+
# DEALINGS IN THE SOFTWARE.
|
| 493 |
+
#
|
| 494 |
+
###############################################################################
|
| 495 |
+
|
| 496 |
+
# The toolkit is located during compiler detection for CUDA and stored in CMakeCUDACompiler.cmake as
|
| 497 |
+
# CMAKE_CUDA_COMPILER_TOOLKIT_ROOT and CMAKE_CUDA_COMPILER_LIBRARY_ROOT.
|
| 498 |
+
# We compute the rest based on those here to avoid re-searching and to avoid finding a possibly
|
| 499 |
+
# different installation.
|
| 500 |
+
if(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT)
|
| 501 |
+
set(CUDAToolkit_ROOT_DIR "${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}")
|
| 502 |
+
set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}")
|
| 503 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}")
|
| 504 |
+
|
| 505 |
+
if(CUDAToolkit_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 506 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 507 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 508 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 509 |
+
endif()
|
| 510 |
+
else()
|
| 511 |
+
function(_CUDAToolkit_find_root_dir )
|
| 512 |
+
cmake_parse_arguments(arg "" "" "SEARCH_PATHS;FIND_FLAGS" ${ARGN})
|
| 513 |
+
|
| 514 |
+
if(NOT CUDAToolkit_BIN_DIR)
|
| 515 |
+
if(NOT CUDAToolkit_SENTINEL_FILE)
|
| 516 |
+
find_program(CUDAToolkit_NVCC_EXECUTABLE
|
| 517 |
+
NAMES nvcc nvcc.exe
|
| 518 |
+
PATHS ${arg_SEARCH_PATHS}
|
| 519 |
+
${arg_FIND_FLAGS}
|
| 520 |
+
)
|
| 521 |
+
endif()
|
| 522 |
+
|
| 523 |
+
if(NOT CUDAToolkit_NVCC_EXECUTABLE)
|
| 524 |
+
find_file(CUDAToolkit_SENTINEL_FILE
|
| 525 |
+
NAMES version.txt
|
| 526 |
+
PATHS ${arg_SEARCH_PATHS}
|
| 527 |
+
NO_DEFAULT_PATH
|
| 528 |
+
)
|
| 529 |
+
endif()
|
| 530 |
+
|
| 531 |
+
if(EXISTS "${CUDAToolkit_NVCC_EXECUTABLE}")
|
| 532 |
+
# If NVCC exists then invoke it to find the toolkit location.
|
| 533 |
+
# This allows us to support wrapper scripts (e.g. ccache or colornvcc), CUDA Toolkit,
|
| 534 |
+
# NVIDIA HPC SDK, and distro's splayed layouts
|
| 535 |
+
execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "-v" "__cmake_determine_cuda"
|
| 536 |
+
OUTPUT_VARIABLE _CUDA_NVCC_OUT ERROR_VARIABLE _CUDA_NVCC_OUT)
|
| 537 |
+
if(_CUDA_NVCC_OUT MATCHES "\\#\\$ TOP=([^\r\n]*)")
|
| 538 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_MATCH_1}/bin" ABSOLUTE)
|
| 539 |
+
else()
|
| 540 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY)
|
| 541 |
+
endif()
|
| 542 |
+
unset(_CUDA_NVCC_OUT)
|
| 543 |
+
|
| 544 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 545 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE)
|
| 546 |
+
endif()
|
| 547 |
+
|
| 548 |
+
if(CUDAToolkit_SENTINEL_FILE)
|
| 549 |
+
get_filename_component(CUDAToolkit_BIN_DIR ${CUDAToolkit_SENTINEL_FILE} DIRECTORY ABSOLUTE)
|
| 550 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}/bin")
|
| 551 |
+
|
| 552 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE)
|
| 553 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 554 |
+
endif()
|
| 555 |
+
endif()
|
| 556 |
+
|
| 557 |
+
if(CUDAToolkit_BIN_DIR)
|
| 558 |
+
get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE)
|
| 559 |
+
set(CUDAToolkit_ROOT_DIR "${CUDAToolkit_ROOT_DIR}" PARENT_SCOPE)
|
| 560 |
+
endif()
|
| 561 |
+
|
| 562 |
+
endfunction()
|
| 563 |
+
|
| 564 |
+
# For NVCC we can easily deduce the SDK binary directory from the compiler path.
|
| 565 |
+
if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR AND CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA")
|
| 566 |
+
get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_CUDA_COMPILER}" DIRECTORY)
|
| 567 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "")
|
| 568 |
+
# Try language provided path first.
|
| 569 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_BIN_DIR}" FIND_FLAGS NO_DEFAULT_PATH)
|
| 570 |
+
mark_as_advanced(CUDAToolkit_BIN_DIR)
|
| 571 |
+
endif()
|
| 572 |
+
|
| 573 |
+
# Try user provided path
|
| 574 |
+
if(NOT CUDAToolkit_ROOT_DIR AND CUDAToolkit_ROOT)
|
| 575 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_ROOT}" FIND_FLAGS PATH_SUFFIXES bin NO_DEFAULT_PATH)
|
| 576 |
+
endif()
|
| 577 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 578 |
+
_CUDAToolkit_find_root_dir(FIND_FLAGS PATHS ENV CUDA_PATH PATH_SUFFIXES bin)
|
| 579 |
+
endif()
|
| 580 |
+
|
| 581 |
+
# If the user specified CUDAToolkit_ROOT but the toolkit could not be found, this is an error.
|
| 582 |
+
if(NOT CUDAToolkit_ROOT_DIR AND (DEFINED CUDAToolkit_ROOT OR DEFINED ENV{CUDAToolkit_ROOT}))
|
| 583 |
+
# Declare error messages now, print later depending on find_package args.
|
| 584 |
+
set(fail_base "Could not find nvcc executable in path specified by")
|
| 585 |
+
set(cuda_root_fail "${fail_base} CUDAToolkit_ROOT=${CUDAToolkit_ROOT}")
|
| 586 |
+
set(env_cuda_root_fail "${fail_base} environment variable CUDAToolkit_ROOT=$ENV{CUDAToolkit_ROOT}")
|
| 587 |
+
|
| 588 |
+
if(CUDAToolkit_FIND_REQUIRED)
|
| 589 |
+
if(DEFINED CUDAToolkit_ROOT)
|
| 590 |
+
message(FATAL_ERROR ${cuda_root_fail})
|
| 591 |
+
elseif(DEFINED ENV{CUDAToolkit_ROOT})
|
| 592 |
+
message(FATAL_ERROR ${env_cuda_root_fail})
|
| 593 |
+
endif()
|
| 594 |
+
else()
|
| 595 |
+
if(NOT CUDAToolkit_FIND_QUIETLY)
|
| 596 |
+
if(DEFINED CUDAToolkit_ROOT)
|
| 597 |
+
message(STATUS ${cuda_root_fail})
|
| 598 |
+
elseif(DEFINED ENV{CUDAToolkit_ROOT})
|
| 599 |
+
message(STATUS ${env_cuda_root_fail})
|
| 600 |
+
endif()
|
| 601 |
+
endif()
|
| 602 |
+
set(CUDAToolkit_FOUND FALSE)
|
| 603 |
+
unset(fail_base)
|
| 604 |
+
unset(cuda_root_fail)
|
| 605 |
+
unset(env_cuda_root_fail)
|
| 606 |
+
return()
|
| 607 |
+
endif()
|
| 608 |
+
endif()
|
| 609 |
+
|
| 610 |
+
# CUDAToolkit_ROOT cmake / env variable not specified, try platform defaults.
|
| 611 |
+
#
|
| 612 |
+
# - Linux: /usr/local/cuda-X.Y
|
| 613 |
+
# - macOS: /Developer/NVIDIA/CUDA-X.Y
|
| 614 |
+
# - Windows: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y
|
| 615 |
+
#
|
| 616 |
+
# We will also search the default symlink location /usr/local/cuda first since
|
| 617 |
+
# if CUDAToolkit_ROOT is not specified, it is assumed that the symlinked
|
| 618 |
+
# directory is the desired location.
|
| 619 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 620 |
+
if(UNIX)
|
| 621 |
+
if(NOT APPLE)
|
| 622 |
+
set(platform_base "/usr/local/cuda-")
|
| 623 |
+
else()
|
| 624 |
+
set(platform_base "/Developer/NVIDIA/CUDA-")
|
| 625 |
+
endif()
|
| 626 |
+
else()
|
| 627 |
+
set(platform_base "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v")
|
| 628 |
+
endif()
|
| 629 |
+
|
| 630 |
+
# Build out a descending list of possible cuda installations, e.g.
|
| 631 |
+
file(GLOB possible_paths "${platform_base}*")
|
| 632 |
+
# Iterate the glob results and create a descending list.
|
| 633 |
+
set(versions)
|
| 634 |
+
foreach(p ${possible_paths})
|
| 635 |
+
# Extract version number from end of string
|
| 636 |
+
string(REGEX MATCH "[0-9][0-9]?\\.[0-9]$" p_version ${p})
|
| 637 |
+
if(IS_DIRECTORY ${p} AND p_version)
|
| 638 |
+
list(APPEND versions ${p_version})
|
| 639 |
+
endif()
|
| 640 |
+
endforeach()
|
| 641 |
+
|
| 642 |
+
# Sort numerically in descending order, so we try the newest versions first.
|
| 643 |
+
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18)
|
| 644 |
+
list(SORT versions COMPARE NATURAL ORDER DESCENDING)
|
| 645 |
+
elseif(versions)
|
| 646 |
+
# Alphabetical sort here is not ideal but better than nothing
|
| 647 |
+
list(SORT versions)
|
| 648 |
+
list(REVERSE versions)
|
| 649 |
+
endif()
|
| 650 |
+
|
| 651 |
+
# With a descending list of versions, populate possible paths to search.
|
| 652 |
+
set(search_paths)
|
| 653 |
+
foreach(v ${versions})
|
| 654 |
+
list(APPEND search_paths "${platform_base}${v}")
|
| 655 |
+
endforeach()
|
| 656 |
+
|
| 657 |
+
# Force the global default /usr/local/cuda to the front on Unix.
|
| 658 |
+
if(UNIX)
|
| 659 |
+
list(INSERT search_paths 0 "/usr/local/cuda")
|
| 660 |
+
endif()
|
| 661 |
+
|
| 662 |
+
# Now search for the toolkit again using the platform default search paths.
|
| 663 |
+
_CUDAToolkit_find_root_dir(SEARCH_PATHS "${search_paths}" FIND_FLAGS PATH_SUFFIXES bin)
|
| 664 |
+
|
| 665 |
+
# We are done with these variables now, cleanup for caller.
|
| 666 |
+
unset(platform_base)
|
| 667 |
+
unset(possible_paths)
|
| 668 |
+
unset(versions)
|
| 669 |
+
unset(search_paths)
|
| 670 |
+
|
| 671 |
+
if(NOT CUDAToolkit_ROOT_DIR)
|
| 672 |
+
if(CUDAToolkit_FIND_REQUIRED)
|
| 673 |
+
message(FATAL_ERROR "Could not find nvcc, please set CUDAToolkit_ROOT.")
|
| 674 |
+
elseif(NOT CUDAToolkit_FIND_QUIETLY)
|
| 675 |
+
message(STATUS "Could not find nvcc, please set CUDAToolkit_ROOT.")
|
| 676 |
+
endif()
|
| 677 |
+
|
| 678 |
+
set(CUDAToolkit_FOUND FALSE)
|
| 679 |
+
return()
|
| 680 |
+
endif()
|
| 681 |
+
endif()
|
| 682 |
+
endif()
|
| 683 |
+
|
| 684 |
+
if(NOT CUDAToolkit_BIN_DIR)
|
| 685 |
+
set(CUDAToolkit_BIN_DIR "${CUDAToolkit_ROOT_DIR}/bin")
|
| 686 |
+
endif()
|
| 687 |
+
|
| 688 |
+
if(NOT CUDAToolkit_NVCC_EXECUTABLE)
|
| 689 |
+
set(CUDAToolkit_NVCC_EXECUTABLE "${CUDAToolkit_BIN_DIR}/nvcc${CMAKE_EXECUTABLE_SUFFIX}")
|
| 690 |
+
endif()
|
| 691 |
+
|
| 692 |
+
if(CMAKE_CUDA_COMPILER_TOOLKIT_VERSION)
|
| 693 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}")
|
| 694 |
+
else()
|
| 695 |
+
function(_CUDAToolkit_find_version_file result_variable)
|
| 696 |
+
# We first check for a non-scattered installation to prefer it over a scattered installation.
|
| 697 |
+
if(CUDAToolkit_ROOT AND EXISTS "${CUDAToolkit_ROOT}/version.txt")
|
| 698 |
+
set(${result_variable} "${CUDAToolkit_ROOT}/version.txt" PARENT_SCOPE)
|
| 699 |
+
elseif(CUDAToolkit_ROOT_DIR AND EXISTS "${CUDAToolkit_ROOT_DIR}/version.txt")
|
| 700 |
+
set(${result_variable} "${CUDAToolkit_ROOT_DIR}/version.txt" PARENT_SCOPE)
|
| 701 |
+
elseif(CMAKE_SYSROOT_LINK AND EXISTS "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt")
|
| 702 |
+
set(${result_variable} "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt" PARENT_SCOPE)
|
| 703 |
+
elseif(EXISTS "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt")
|
| 704 |
+
set(${result_variable} "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt" PARENT_SCOPE)
|
| 705 |
+
endif()
|
| 706 |
+
endfunction()
|
| 707 |
+
|
| 708 |
+
_CUDAToolkit_find_version_file( _CUDAToolkit_version_file )
|
| 709 |
+
if(_CUDAToolkit_version_file)
|
| 710 |
+
# CUDAToolkit_LIBRARY_ROOT contains the device library and version file.
|
| 711 |
+
get_filename_component(CUDAToolkit_LIBRARY_ROOT "${_CUDAToolkit_version_file}" DIRECTORY ABSOLUTE)
|
| 712 |
+
endif()
|
| 713 |
+
unset(_CUDAToolkit_version_file)
|
| 714 |
+
|
| 715 |
+
if(CUDAToolkit_NVCC_EXECUTABLE AND
|
| 716 |
+
CMAKE_CUDA_COMPILER_VERSION AND
|
| 717 |
+
CUDAToolkit_NVCC_EXECUTABLE STREQUAL CMAKE_CUDA_COMPILER)
|
| 718 |
+
# Need to set these based off the already computed CMAKE_CUDA_COMPILER_VERSION value
|
| 719 |
+
# This if statement will always match, but is used to provide variables for MATCH 1,2,3...
|
| 720 |
+
if(CMAKE_CUDA_COMPILER_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 721 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 722 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 723 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 724 |
+
set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_VERSION}")
|
| 725 |
+
endif()
|
| 726 |
+
elseif(CUDAToolkit_NVCC_EXECUTABLE)
|
| 727 |
+
# Compute the version by invoking nvcc
|
| 728 |
+
execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT)
|
| 729 |
+
if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 730 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 731 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 732 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 733 |
+
set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}")
|
| 734 |
+
endif()
|
| 735 |
+
unset(NVCC_OUT)
|
| 736 |
+
else()
|
| 737 |
+
_CUDAToolkit_find_version_file(version_file)
|
| 738 |
+
if(version_file)
|
| 739 |
+
file(READ "${version_file}" VERSION_INFO)
|
| 740 |
+
if(VERSION_INFO MATCHES [=[CUDA Version ([0-9]+)\.([0-9]+)\.([0-9]+)]=])
|
| 741 |
+
set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
| 742 |
+
set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}")
|
| 743 |
+
set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}")
|
| 744 |
+
set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}")
|
| 745 |
+
endif()
|
| 746 |
+
endif()
|
| 747 |
+
endif()
|
| 748 |
+
endif()
|
| 749 |
+
|
| 750 |
+
# Find target directory when crosscompiling.
|
| 751 |
+
if(CMAKE_CROSSCOMPILING)
|
| 752 |
+
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a")
|
| 753 |
+
# Support for NVPACK
|
| 754 |
+
set(CUDAToolkit_TARGET_NAME "armv7-linux-androideabi")
|
| 755 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
| 756 |
+
set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf")
|
| 757 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
| 758 |
+
if(ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 759 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi")
|
| 760 |
+
elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX")
|
| 761 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-qnx")
|
| 762 |
+
else()
|
| 763 |
+
set(CUDAToolkit_TARGET_NAME "aarch64-linux")
|
| 764 |
+
endif(ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 765 |
+
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
| 766 |
+
set(CUDAToolkit_TARGET_NAME "x86_64-linux")
|
| 767 |
+
endif()
|
| 768 |
+
|
| 769 |
+
if(EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}")
|
| 770 |
+
set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}")
|
| 771 |
+
# add known CUDA target root path to the set of directories we search for programs, libraries and headers
|
| 772 |
+
list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}")
|
| 773 |
+
|
| 774 |
+
# Mark that we need to pop the root search path changes after we have
|
| 775 |
+
# found all cuda libraries so that searches for our cross-compilation
|
| 776 |
+
# libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or
|
| 777 |
+
# PATh
|
| 778 |
+
set(_CUDAToolkit_Pop_ROOT_PATH True)
|
| 779 |
+
endif()
|
| 780 |
+
endif()
|
| 781 |
+
|
| 782 |
+
# If not already set we can simply use the toolkit root or it's a scattered installation.
|
| 783 |
+
if(NOT CUDAToolkit_TARGET_DIR)
|
| 784 |
+
# Not cross compiling
|
| 785 |
+
set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}")
|
| 786 |
+
# Now that we have the real ROOT_DIR, find components inside it.
|
| 787 |
+
list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR})
|
| 788 |
+
|
| 789 |
+
# Mark that we need to pop the prefix path changes after we have
|
| 790 |
+
# found the cudart library.
|
| 791 |
+
set(_CUDAToolkit_Pop_Prefix True)
|
| 792 |
+
endif()
|
| 793 |
+
|
| 794 |
+
# CUDAToolkit_TARGET_DIR always points to the directory containing the include directory.
|
| 795 |
+
# On a scattered installation /usr, on a non-scattered something like /usr/local/cuda or /usr/local/cuda-10.2/targets/aarch64-linux.
|
| 796 |
+
if(EXISTS "${CUDAToolkit_TARGET_DIR}/include/cuda_runtime.h")
|
| 797 |
+
set(CUDAToolkit_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/include")
|
| 798 |
+
elseif(NOT CUDAToolkit_FIND_QUIETLY)
|
| 799 |
+
message(STATUS "Unable to find cuda_runtime.h in \"${CUDAToolkit_TARGET_DIR}/include\" for CUDAToolkit_INCLUDE_DIR.")
|
| 800 |
+
endif()
|
| 801 |
+
|
| 802 |
+
# The NVHPC layout moves math library headers and libraries to a sibling directory.
|
| 803 |
+
# Create a separate variable so this directory can be selectively added to math targets.
|
| 804 |
+
if(NOT EXISTS "${CUDAToolkit_INCLUDE_DIR}/cublas_v2.h")
|
| 805 |
+
set(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/../../math_libs/include")
|
| 806 |
+
get_filename_component(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_MATH_INCLUDE_DIR}" ABSOLUTE)
|
| 807 |
+
if(NOT EXISTS "${CUDAToolkit_MATH_INCLUDE_DIR}/cublas_v2.h")
|
| 808 |
+
if(NOT CUDAToolkit_FIND_QUIETLY)
|
| 809 |
+
message(STATUS "Unable to find cublas_v2.h in either \"${CUDAToolkit_INCLUDE_DIR}\" or \"${CUDAToolkit_MATH_INCLUDE_DIR}\"")
|
| 810 |
+
endif()
|
| 811 |
+
unset(CUDAToolkit_MATH_INCLUDE_DIR)
|
| 812 |
+
endif()
|
| 813 |
+
endif()
|
| 814 |
+
|
| 815 |
+
# Find the CUDA Runtime Library libcudart
|
| 816 |
+
find_library(CUDA_CUDART
|
| 817 |
+
NAMES cudart
|
| 818 |
+
PATH_SUFFIXES lib64 lib/x64
|
| 819 |
+
)
|
| 820 |
+
find_library(CUDA_CUDART
|
| 821 |
+
NAMES cudart
|
| 822 |
+
PATH_SUFFIXES lib64/stubs lib/x64/stubs
|
| 823 |
+
)
|
| 824 |
+
|
| 825 |
+
if(NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY)
|
| 826 |
+
message(STATUS "Unable to find cudart library.")
|
| 827 |
+
endif()
|
| 828 |
+
|
| 829 |
+
if(_CUDAToolkit_Pop_Prefix)
|
| 830 |
+
list(REMOVE_AT CMAKE_PREFIX_PATH -1)
|
| 831 |
+
unset(_CUDAToolkit_Pop_Prefix)
|
| 832 |
+
endif()
|
| 833 |
+
|
| 834 |
+
#-----------------------------------------------------------------------------
|
| 835 |
+
# Perform version comparison and validate all required variables are set.
|
| 836 |
+
include(FindPackageHandleStandardArgs)
|
| 837 |
+
find_package_handle_standard_args(CUDAToolkit
|
| 838 |
+
REQUIRED_VARS
|
| 839 |
+
CUDAToolkit_INCLUDE_DIR
|
| 840 |
+
CUDAToolkit_VERSION
|
| 841 |
+
CUDA_CUDART
|
| 842 |
+
CUDAToolkit_BIN_DIR
|
| 843 |
+
VERSION_VAR
|
| 844 |
+
CUDAToolkit_VERSION
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
mark_as_advanced(CUDA_CUDART
|
| 848 |
+
CUDAToolkit_INCLUDE_DIR
|
| 849 |
+
CUDAToolkit_NVCC_EXECUTABLE
|
| 850 |
+
CUDAToolkit_SENTINEL_FILE
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
#-----------------------------------------------------------------------------
|
| 854 |
+
# Construct result variables
|
| 855 |
+
if(CUDAToolkit_FOUND)
|
| 856 |
+
set(CUDAToolkit_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIR})
|
| 857 |
+
get_filename_component(CUDAToolkit_LIBRARY_DIR ${CUDA_CUDART} DIRECTORY ABSOLUTE)
|
| 858 |
+
endif()
|
| 859 |
+
|
| 860 |
+
#-----------------------------------------------------------------------------
|
| 861 |
+
# Construct import targets
|
| 862 |
+
if(CUDAToolkit_FOUND)
|
| 863 |
+
|
| 864 |
+
function(_CUDAToolkit_find_and_add_import_lib lib_name)
|
| 865 |
+
cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_HINTS;EXTRA_PATH_SUFFIXES;EXTRA_INCLUDE_DIRS" ${ARGN})
|
| 866 |
+
|
| 867 |
+
set(search_names ${lib_name} ${arg_ALT})
|
| 868 |
+
|
| 869 |
+
find_library(CUDA_${lib_name}_LIBRARY
|
| 870 |
+
NAMES ${search_names}
|
| 871 |
+
HINTS ${CUDAToolkit_LIBRARY_DIR}
|
| 872 |
+
ENV CUDA_PATH
|
| 873 |
+
${arg_EXTRA_HINTS}
|
| 874 |
+
PATH_SUFFIXES nvidia/current lib64 lib/x64 lib
|
| 875 |
+
${arg_EXTRA_PATH_SUFFIXES}
|
| 876 |
+
)
|
| 877 |
+
# Don't try any stub directories until we have exhausted all other
|
| 878 |
+
# search locations.
|
| 879 |
+
find_library(CUDA_${lib_name}_LIBRARY
|
| 880 |
+
NAMES ${search_names}
|
| 881 |
+
HINTS ${CUDAToolkit_LIBRARY_DIR}
|
| 882 |
+
ENV CUDA_PATH
|
| 883 |
+
${arg_EXTRA_HINTS}
|
| 884 |
+
PATH_SUFFIXES lib64/stubs lib/x64/stubs lib/stubs stubs
|
| 885 |
+
# Support NVHPC splayed math library layout
|
| 886 |
+
../../math_libs/${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}/lib64
|
| 887 |
+
../../math_libs/lib64
|
| 888 |
+
)
|
| 889 |
+
|
| 890 |
+
mark_as_advanced(CUDA_${lib_name}_LIBRARY)
|
| 891 |
+
|
| 892 |
+
if(NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY)
|
| 893 |
+
add_library(CUDA::${lib_name} UNKNOWN IMPORTED)
|
| 894 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 895 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 896 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 897 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 898 |
+
if(DEFINED CUDAToolkit_MATH_INCLUDE_DIR)
|
| 899 |
+
string(FIND ${CUDA_${lib_name}_LIBRARY} "math_libs" math_libs)
|
| 900 |
+
if(NOT ${math_libs} EQUAL -1)
|
| 901 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 902 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}")
|
| 903 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 904 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}")
|
| 905 |
+
endif()
|
| 906 |
+
endif()
|
| 907 |
+
set_property(TARGET CUDA::${lib_name} PROPERTY IMPORTED_LOCATION "${CUDA_${lib_name}_LIBRARY}")
|
| 908 |
+
foreach(dep ${arg_DEPS})
|
| 909 |
+
if(TARGET CUDA::${dep})
|
| 910 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 911 |
+
INTERFACE_LINK_LIBRARIES CUDA::${dep})
|
| 912 |
+
endif()
|
| 913 |
+
endforeach()
|
| 914 |
+
if(arg_EXTRA_INCLUDE_DIRS)
|
| 915 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 916 |
+
INTERFACE_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}")
|
| 917 |
+
set_property(TARGET CUDA::${lib_name} APPEND PROPERTY
|
| 918 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}")
|
| 919 |
+
endif()
|
| 920 |
+
endif()
|
| 921 |
+
endfunction()
|
| 922 |
+
|
| 923 |
+
if(NOT TARGET CUDA::toolkit)
|
| 924 |
+
add_library(CUDA::toolkit IMPORTED INTERFACE)
|
| 925 |
+
set_property(TARGET CUDA::toolkit APPEND PROPERTY
|
| 926 |
+
INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 927 |
+
set_property(TARGET CUDA::toolkit APPEND PROPERTY
|
| 928 |
+
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}")
|
| 929 |
+
endif()
|
| 930 |
+
|
| 931 |
+
_CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda)
|
| 932 |
+
|
| 933 |
+
_CUDAToolkit_find_and_add_import_lib(cudart)
|
| 934 |
+
_CUDAToolkit_find_and_add_import_lib(cudart_static)
|
| 935 |
+
|
| 936 |
+
# setup dependencies that are required for cudart_static when building
|
| 937 |
+
# on linux. These are generally only required when using the CUDA toolkit
|
| 938 |
+
# when CUDA language is disabled
|
| 939 |
+
if(NOT TARGET CUDA::cudart_static_deps
|
| 940 |
+
AND TARGET CUDA::cudart_static)
|
| 941 |
+
|
| 942 |
+
add_library(CUDA::cudart_static_deps IMPORTED INTERFACE)
|
| 943 |
+
set_property(TARGET CUDA::cudart_static APPEND PROPERTY
|
| 944 |
+
INTERFACE_LINK_LIBRARIES CUDA::cudart_static_deps)
|
| 945 |
+
|
| 946 |
+
if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER))
|
| 947 |
+
find_package(Threads REQUIRED)
|
| 948 |
+
set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY
|
| 949 |
+
INTERFACE_LINK_LIBRARIES Threads::Threads ${CMAKE_DL_LIBS})
|
| 950 |
+
endif()
|
| 951 |
+
|
| 952 |
+
if(UNIX AND NOT APPLE AND NOT (CMAKE_SYSTEM_NAME STREQUAL "QNX"))
|
| 953 |
+
# On Linux, you must link against librt when using the static cuda runtime.
|
| 954 |
+
find_library(CUDAToolkit_rt_LIBRARY rt)
|
| 955 |
+
mark_as_advanced(CUDAToolkit_rt_LIBRARY)
|
| 956 |
+
if(NOT CUDAToolkit_rt_LIBRARY)
|
| 957 |
+
message(WARNING "Could not find librt library, needed by CUDA::cudart_static")
|
| 958 |
+
else()
|
| 959 |
+
set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY
|
| 960 |
+
INTERFACE_LINK_LIBRARIES ${CUDAToolkit_rt_LIBRARY})
|
| 961 |
+
endif()
|
| 962 |
+
endif()
|
| 963 |
+
endif()
|
| 964 |
+
|
| 965 |
+
_CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library
|
| 966 |
+
foreach(cuda_lib cublasLt cufft curand cusparse nppc nvjpeg)
|
| 967 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib})
|
| 968 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos)
|
| 969 |
+
endforeach()
|
| 970 |
+
|
| 971 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.0.0)
|
| 972 |
+
# cublas depends on cublasLt
|
| 973 |
+
# https://docs.nvidia.com/cuda/archive/11.0/cublas/index.html#static-library
|
| 974 |
+
_CUDAToolkit_find_and_add_import_lib(cublas DEPS cublasLt)
|
| 975 |
+
_CUDAToolkit_find_and_add_import_lib(cublas_static DEPS cublasLt_static)
|
| 976 |
+
else()
|
| 977 |
+
_CUDAToolkit_find_and_add_import_lib(cublas)
|
| 978 |
+
_CUDAToolkit_find_and_add_import_lib(cublas_static DEPS culibos)
|
| 979 |
+
endif()
|
| 980 |
+
|
| 981 |
+
# cuFFTW depends on cuFFT
|
| 982 |
+
_CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft)
|
| 983 |
+
_CUDAToolkit_find_and_add_import_lib(cufftw_static DEPS cufft_static)
|
| 984 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 9.2)
|
| 985 |
+
_CUDAToolkit_find_and_add_import_lib(cufft_static_nocallback DEPS culibos)
|
| 986 |
+
endif()
|
| 987 |
+
|
| 988 |
+
# cuSOLVER depends on cuBLAS, and cuSPARSE
|
| 989 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse)
|
| 990 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos)
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 10.1.2)
|
| 994 |
+
# cusolver depends on liblapack_static.a starting with CUDA 10.1 update 2,
|
| 995 |
+
# https://docs.nvidia.com/cuda/archive/11.5.0/cusolver/index.html#static-link-lapack
|
| 996 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_lapack_static ALT lapack_static) # implementation detail static lib
|
| 997 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_lapack_static)
|
| 998 |
+
endif()
|
| 999 |
+
|
| 1000 |
+
if(CUDAToolkit_VERSION VERSION_GREATER 11.2.1)
|
| 1001 |
+
# cusolver depends on libcusolver_metis and cublasLt
|
| 1002 |
+
# https://docs.nvidia.com/cuda/archive/11.2.2/cusolver/index.html#link-dependency
|
| 1003 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublasLt)
|
| 1004 |
+
|
| 1005 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_metis_static ALT metis_static) # implementation detail static lib
|
| 1006 |
+
_CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_metis_static cublasLt_static)
|
| 1007 |
+
endif()
|
| 1008 |
+
|
| 1009 |
+
# nvGRAPH depends on cuRAND, and cuSOLVER.
|
| 1010 |
+
_CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver)
|
| 1011 |
+
_CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static)
|
| 1012 |
+
|
| 1013 |
+
# Process the majority of the NPP libraries.
|
| 1014 |
+
foreach(cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu)
|
| 1015 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc)
|
| 1016 |
+
_CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static)
|
| 1017 |
+
endforeach()
|
| 1018 |
+
|
| 1019 |
+
find_path(CUDAToolkit_CUPTI_INCLUDE_DIR cupti.h PATHS
|
| 1020 |
+
"${CUDAToolkit_ROOT_DIR}/extras/CUPTI/include"
|
| 1021 |
+
"${CUDAToolkit_INCLUDE_DIR}/../extras/CUPTI/include"
|
| 1022 |
+
"${CUDAToolkit_INCLUDE_DIR}"
|
| 1023 |
+
NO_DEFAULT_PATH)
|
| 1024 |
+
mark_as_advanced(CUDAToolkit_CUPTI_INCLUDE_DIR)
|
| 1025 |
+
|
| 1026 |
+
if(CUDAToolkit_CUPTI_INCLUDE_DIR)
|
| 1027 |
+
_CUDAToolkit_find_and_add_import_lib(cupti
|
| 1028 |
+
EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/
|
| 1029 |
+
../extras/CUPTI/lib/
|
| 1030 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}")
|
| 1031 |
+
_CUDAToolkit_find_and_add_import_lib(cupti_static
|
| 1032 |
+
EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/
|
| 1033 |
+
../extras/CUPTI/lib/
|
| 1034 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}")
|
| 1035 |
+
endif()
|
| 1036 |
+
|
| 1037 |
+
_CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver)
|
| 1038 |
+
|
| 1039 |
+
_CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml)
|
| 1040 |
+
|
| 1041 |
+
# nvtools can be installed outside the CUDA toolkit directory,
|
| 1042 |
+
# so search the NVTOOLSEXT_PATH windows only environment variable
|
| 1043 |
+
set(nvToolsExt_EXTRA_PATH)
|
| 1044 |
+
if(WIN32)
|
| 1045 |
+
set(nvToolsExt_EXTRA_PATH "C:\\Program Files\\NVIDIA Corporation\\NvToolsExt")
|
| 1046 |
+
endif()
|
| 1047 |
+
|
| 1048 |
+
find_path(CUDAToolkit_nvToolsExt_INCLUDE_DIR nvToolsExt.h
|
| 1049 |
+
PATHS "${CUDAToolkit_INCLUDE_DIR}"
|
| 1050 |
+
"${CUDAToolkit_ROOT_DIR}"
|
| 1051 |
+
ENV NVTOOLSEXT_PATH
|
| 1052 |
+
"${nvToolsExt_EXTRA_PATH}"
|
| 1053 |
+
PATH_SUFFIXES include
|
| 1054 |
+
NO_DEFAULT_PATH)
|
| 1055 |
+
mark_as_advanced(CUDAToolkit_nvToolsExt_INCLUDE_DIR)
|
| 1056 |
+
|
| 1057 |
+
if(CUDAToolkit_nvToolsExt_INCLUDE_DIR)
|
| 1058 |
+
_CUDAToolkit_find_and_add_import_lib(nvToolsExt
|
| 1059 |
+
ALT nvToolsExt64 nvToolsExt64_1
|
| 1060 |
+
EXTRA_HINTS ENV NVTOOLSEXT_PATH
|
| 1061 |
+
"${nvToolsExt_EXTRA_PATH}"
|
| 1062 |
+
EXTRA_INCLUDE_DIRS "${CUDAToolkit_nvToolsExt_INCLUDE_DIR}")
|
| 1063 |
+
endif()
|
| 1064 |
+
|
| 1065 |
+
_CUDAToolkit_find_and_add_import_lib(OpenCL)
|
| 1066 |
+
endif()
|
| 1067 |
+
|
| 1068 |
+
unset(CUDAToolkit_ROOT_DIR)
|
| 1069 |
+
|
| 1070 |
+
if(_CUDAToolkit_Pop_ROOT_PATH)
|
| 1071 |
+
list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0)
|
| 1072 |
+
unset(_CUDAToolkit_Pop_ROOT_PATH)
|
| 1073 |
+
endif()
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUSPARSELT.cmake
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Find the CUSPARSELT library
|
| 2 |
+
#
|
| 3 |
+
# The following variables are optionally searched for defaults
|
| 4 |
+
# CUSPARSELT_ROOT: Base directory where CUSPARSELT is found
|
| 5 |
+
# CUSPARSELT_INCLUDE_DIR: Directory where CUSPARSELT header is searched for
|
| 6 |
+
# CUSPARSELT_LIBRARY: Directory where CUSPARSELT library is searched for
|
| 7 |
+
#
|
| 8 |
+
# The following are set after configuration is done:
|
| 9 |
+
# CUSPARSELT_FOUND
|
| 10 |
+
# CUSPARSELT_INCLUDE_PATH
|
| 11 |
+
# CUSPARSELT_LIBRARY_PATH
|
| 12 |
+
|
| 13 |
+
include(FindPackageHandleStandardArgs)
|
| 14 |
+
|
| 15 |
+
set(CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt")
|
| 16 |
+
if (DEFINED $ENV{CUSPARSELT_ROOT_DIR})
|
| 17 |
+
message(WARNING "CUSPARSELT_ROOT_DIR is deprecated. Please set CUSPARSELT_ROOT instead.")
|
| 18 |
+
endif()
|
| 19 |
+
list(APPEND CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR})
|
| 20 |
+
|
| 21 |
+
# Compatible layer for CMake <3.12. CUSPARSELT_ROOT will be accounted in for searching paths and libraries for CMake >=3.12.
|
| 22 |
+
list(APPEND CMAKE_PREFIX_PATH ${CUSPARSELT_ROOT})
|
| 23 |
+
|
| 24 |
+
set(CUSPARSELT_INCLUDE_DIR $ENV{CUSPARSELT_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt header files")
|
| 25 |
+
|
| 26 |
+
find_path(CUSPARSELT_INCLUDE_PATH cusparseLt.h
|
| 27 |
+
HINTS ${CUSPARSELT_INCLUDE_DIR}
|
| 28 |
+
PATH_SUFFIXES cuda/include cuda include)
|
| 29 |
+
|
| 30 |
+
set(CUSPARSELT_LIBRARY $ENV{CUSPARSELT_LIBRARY} CACHE PATH "Path to the cusparselt library file (e.g., libcusparseLt.so)")
|
| 31 |
+
|
| 32 |
+
find_library(CUSPARSELT_LIBRARY_PATH libcusparseLt.so
|
| 33 |
+
PATHS ${CUSPARSELT_LIBRARY}
|
| 34 |
+
PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64)
|
| 35 |
+
|
| 36 |
+
find_package_handle_standard_args(CUSPARSELT DEFAULT_MSG CUSPARSELT_LIBRARY_PATH CUSPARSELT_INCLUDE_PATH)
|
| 37 |
+
|
| 38 |
+
if(CUSPARSELT_FOUND)
|
| 39 |
+
# Get cuSPARSELt version
|
| 40 |
+
file(READ ${CUSPARSELT_INCLUDE_PATH}/cusparseLt.h CUSPARSELT_HEADER_CONTENTS)
|
| 41 |
+
string(REGEX MATCH "define CUSPARSELT_VER_MAJOR * +([0-9]+)"
|
| 42 |
+
CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_HEADER_CONTENTS}")
|
| 43 |
+
string(REGEX REPLACE "define CUSPARSELT_VER_MAJOR * +([0-9]+)" "\\1"
|
| 44 |
+
CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_VERSION_MAJOR}")
|
| 45 |
+
string(REGEX MATCH "define CUSPARSELT_VER_MINOR * +([0-9]+)"
|
| 46 |
+
CUSPARSELT_VERSION_MINOR "${CUSPARSELT_HEADER_CONTENTS}")
|
| 47 |
+
string(REGEX REPLACE "define CUSPARSELT_VER_MINOR * +([0-9]+)" "\\1"
|
| 48 |
+
CUSPARSELT_VERSION_MINOR "${CUSPARSELT_VERSION_MINOR}")
|
| 49 |
+
string(REGEX MATCH "define CUSPARSELT_VER_PATCH * +([0-9]+)"
|
| 50 |
+
CUSPARSELT_VERSION_PATCH "${CUSPARSELT_HEADER_CONTENTS}")
|
| 51 |
+
string(REGEX REPLACE "define CUSPARSELT_VER_PATCH * +([0-9]+)" "\\1"
|
| 52 |
+
CUSPARSELT_VERSION_PATCH "${CUSPARSELT_VERSION_PATCH}")
|
| 53 |
+
# Assemble cuSPARSELt version. Use minor version since current major version is 0.
|
| 54 |
+
if(NOT CUSPARSELT_VERSION_MINOR)
|
| 55 |
+
set(CUSPARSELT_VERSION "?")
|
| 56 |
+
else()
|
| 57 |
+
set(CUSPARSELT_VERSION
|
| 58 |
+
"${CUSPARSELT_VERSION_MAJOR}.${CUSPARSELT_VERSION_MINOR}.${CUSPARSELT_VERSION_PATCH}")
|
| 59 |
+
endif()
|
| 60 |
+
endif()
|
| 61 |
+
|
| 62 |
+
mark_as_advanced(CUSPARSELT_ROOT CUSPARSELT_INCLUDE_DIR CUSPARSELT_LIBRARY CUSPARSELT_VERSION)
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This is a wrapper of the upstream `./upstream/FindCUDA.cmake` that
|
| 2 |
+
# automatically includes `./upstream/CMakeInitializeConfigs.cmake` before
|
| 3 |
+
# `./upstream/FindCUDA.cmake`. The `CMakeInitializeConfigs.cmake`, which is
|
| 4 |
+
# absent in old CMake versions, creates some necessary variables for the later
|
| 5 |
+
# to run.
|
| 6 |
+
# See ./README.md for details.
|
| 7 |
+
|
| 8 |
+
set(UPSTREAM_FIND_CUDA_DIR "${CMAKE_CURRENT_LIST_DIR}/upstream/")
|
| 9 |
+
|
| 10 |
+
include("${UPSTREAM_FIND_CUDA_DIR}/CMakeInitializeConfigs.cmake")
|
| 11 |
+
include("${UPSTREAM_FIND_CUDA_DIR}/FindCUDA.cmake")
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Find the CUDNN libraries
|
| 2 |
+
#
|
| 3 |
+
# The following variables are optionally searched for defaults
|
| 4 |
+
# CUDNN_ROOT: Base directory where CUDNN is found
|
| 5 |
+
# CUDNN_INCLUDE_DIR: Directory where CUDNN header is searched for
|
| 6 |
+
# CUDNN_LIBRARY: Directory where CUDNN library is searched for
|
| 7 |
+
# CUDNN_STATIC: Are we looking for a static library? (default: no)
|
| 8 |
+
#
|
| 9 |
+
# The following are set after configuration is done:
|
| 10 |
+
# CUDNN_FOUND
|
| 11 |
+
# CUDNN_INCLUDE_PATH
|
| 12 |
+
# CUDNN_LIBRARY_PATH
|
| 13 |
+
#
|
| 14 |
+
|
| 15 |
+
include(FindPackageHandleStandardArgs)
|
| 16 |
+
|
| 17 |
+
set(CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuDNN")
|
| 18 |
+
if (DEFINED $ENV{CUDNN_ROOT_DIR})
|
| 19 |
+
message(WARNING "CUDNN_ROOT_DIR is deprecated. Please set CUDNN_ROOT instead.")
|
| 20 |
+
endif()
|
| 21 |
+
list(APPEND CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR})
|
| 22 |
+
|
| 23 |
+
# Compatible layer for CMake <3.12. CUDNN_ROOT will be accounted in for searching paths and libraries for CMake >=3.12.
|
| 24 |
+
list(APPEND CMAKE_PREFIX_PATH ${CUDNN_ROOT})
|
| 25 |
+
|
| 26 |
+
set(CUDNN_INCLUDE_DIR $ENV{CUDNN_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuDNN header files")
|
| 27 |
+
|
| 28 |
+
find_path(CUDNN_INCLUDE_PATH cudnn.h
|
| 29 |
+
HINTS ${CUDNN_INCLUDE_DIR}
|
| 30 |
+
PATH_SUFFIXES cuda/include cuda include)
|
| 31 |
+
|
| 32 |
+
option(CUDNN_STATIC "Look for static CUDNN" OFF)
|
| 33 |
+
if (CUDNN_STATIC)
|
| 34 |
+
set(CUDNN_LIBNAME "libcudnn_static.a")
|
| 35 |
+
else()
|
| 36 |
+
set(CUDNN_LIBNAME "cudnn")
|
| 37 |
+
endif()
|
| 38 |
+
|
| 39 |
+
set(CUDNN_LIBRARY $ENV{CUDNN_LIBRARY} CACHE PATH "Path to the cudnn library file (e.g., libcudnn.so)")
|
| 40 |
+
if (CUDNN_LIBRARY MATCHES ".*cudnn_static.a" AND NOT CUDNN_STATIC)
|
| 41 |
+
message(WARNING "CUDNN_LIBRARY points to a static library (${CUDNN_LIBRARY}) but CUDNN_STATIC is OFF.")
|
| 42 |
+
endif()
|
| 43 |
+
|
| 44 |
+
find_library(CUDNN_LIBRARY_PATH ${CUDNN_LIBNAME}
|
| 45 |
+
PATHS ${CUDNN_LIBRARY}
|
| 46 |
+
PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64)
|
| 47 |
+
|
| 48 |
+
find_package_handle_standard_args(CUDNN DEFAULT_MSG CUDNN_LIBRARY_PATH CUDNN_INCLUDE_PATH)
|
| 49 |
+
|
| 50 |
+
if(CUDNN_FOUND)
|
| 51 |
+
# Get cuDNN version
|
| 52 |
+
if(EXISTS ${CUDNN_INCLUDE_PATH}/cudnn_version.h)
|
| 53 |
+
file(READ ${CUDNN_INCLUDE_PATH}/cudnn_version.h CUDNN_HEADER_CONTENTS)
|
| 54 |
+
else()
|
| 55 |
+
file(READ ${CUDNN_INCLUDE_PATH}/cudnn.h CUDNN_HEADER_CONTENTS)
|
| 56 |
+
endif()
|
| 57 |
+
string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)"
|
| 58 |
+
CUDNN_VERSION_MAJOR "${CUDNN_HEADER_CONTENTS}")
|
| 59 |
+
string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1"
|
| 60 |
+
CUDNN_VERSION_MAJOR "${CUDNN_VERSION_MAJOR}")
|
| 61 |
+
string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)"
|
| 62 |
+
CUDNN_VERSION_MINOR "${CUDNN_HEADER_CONTENTS}")
|
| 63 |
+
string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1"
|
| 64 |
+
CUDNN_VERSION_MINOR "${CUDNN_VERSION_MINOR}")
|
| 65 |
+
string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)"
|
| 66 |
+
CUDNN_VERSION_PATCH "${CUDNN_HEADER_CONTENTS}")
|
| 67 |
+
string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1"
|
| 68 |
+
CUDNN_VERSION_PATCH "${CUDNN_VERSION_PATCH}")
|
| 69 |
+
# Assemble cuDNN version
|
| 70 |
+
if(NOT CUDNN_VERSION_MAJOR)
|
| 71 |
+
set(CUDNN_VERSION "?")
|
| 72 |
+
else()
|
| 73 |
+
set(CUDNN_VERSION
|
| 74 |
+
"${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}")
|
| 75 |
+
endif()
|
| 76 |
+
endif()
|
| 77 |
+
|
| 78 |
+
mark_as_advanced(CUDNN_ROOT CUDNN_INCLUDE_DIR CUDNN_LIBRARY CUDNN_VERSION)
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
|
| 2 |
+
# file Copyright.txt or https://cmake.org/licensing for details.
|
| 3 |
+
|
| 4 |
+
# Present in upstream, but not supported on versions of cmake we need to support
|
| 5 |
+
# include_guard(GLOBAL)
|
| 6 |
+
|
| 7 |
+
# Initializes `<_PREFIX>_<CONFIG>` variables from the corresponding
|
| 8 |
+
# `<_PREFIX>_<CONFIG>_INIT`, for the configurations currently used.
|
| 9 |
+
function(cmake_initialize_per_config_variable _PREFIX _DOCSTRING)
|
| 10 |
+
string(STRIP "${${_PREFIX}_INIT}" _INIT)
|
| 11 |
+
set("${_PREFIX}" "${_INIT}"
|
| 12 |
+
CACHE STRING "${_DOCSTRING} during all build types.")
|
| 13 |
+
mark_as_advanced("${_PREFIX}")
|
| 14 |
+
|
| 15 |
+
if (NOT CMAKE_NOT_USING_CONFIG_FLAGS)
|
| 16 |
+
set(_CONFIGS Debug Release MinSizeRel RelWithDebInfo)
|
| 17 |
+
|
| 18 |
+
get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
| 19 |
+
if (_GENERATOR_IS_MULTI_CONFIG)
|
| 20 |
+
list(APPEND _CONFIGS ${CMAKE_CONFIGURATION_TYPES})
|
| 21 |
+
else()
|
| 22 |
+
if (NOT CMAKE_NO_BUILD_TYPE)
|
| 23 |
+
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE_INIT}" CACHE STRING
|
| 24 |
+
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel ...")
|
| 25 |
+
endif()
|
| 26 |
+
list(APPEND _CONFIGS ${CMAKE_BUILD_TYPE})
|
| 27 |
+
endif()
|
| 28 |
+
|
| 29 |
+
list(REMOVE_DUPLICATES _CONFIGS)
|
| 30 |
+
foreach(_BUILD_TYPE IN LISTS _CONFIGS)
|
| 31 |
+
if (NOT "${_BUILD_TYPE}" STREQUAL "")
|
| 32 |
+
string(TOUPPER "${_BUILD_TYPE}" _BUILD_TYPE)
|
| 33 |
+
string(STRIP "${${_PREFIX}_${_BUILD_TYPE}_INIT}" _INIT)
|
| 34 |
+
set("${_PREFIX}_${_BUILD_TYPE}" "${_INIT}"
|
| 35 |
+
CACHE STRING "${_DOCSTRING} during ${_BUILD_TYPE} builds.")
|
| 36 |
+
mark_as_advanced("${_PREFIX}_${_BUILD_TYPE}")
|
| 37 |
+
endif()
|
| 38 |
+
endforeach()
|
| 39 |
+
endif()
|
| 40 |
+
endfunction()
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake
ADDED
|
@@ -0,0 +1,1979 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#.rst:
|
| 2 |
+
# FindCUDA
|
| 3 |
+
# --------
|
| 4 |
+
#
|
| 5 |
+
# .. note::
|
| 6 |
+
#
|
| 7 |
+
# The FindCUDA module has been superseded by first-class support
|
| 8 |
+
# for the CUDA language in CMake. It is no longer necessary to
|
| 9 |
+
# use this module or call ``find_package(CUDA)``. This module
|
| 10 |
+
# now exists only for compatibility with projects that have not
|
| 11 |
+
# been ported.
|
| 12 |
+
#
|
| 13 |
+
# Instead, list ``CUDA`` among the languages named in the top-level
|
| 14 |
+
# call to the :command:`project` command, or call the
|
| 15 |
+
# :command:`enable_language` command with ``CUDA``.
|
| 16 |
+
# Then one can add CUDA (``.cu``) sources to programs directly
|
| 17 |
+
# in calls to :command:`add_library` and :command:`add_executable`.
|
| 18 |
+
#
|
| 19 |
+
# Tools for building CUDA C files: libraries and build dependencies.
|
| 20 |
+
#
|
| 21 |
+
# This script locates the NVIDIA CUDA C tools. It should work on Linux,
|
| 22 |
+
# Windows, and macOS and should be reasonably up to date with CUDA C
|
| 23 |
+
# releases.
|
| 24 |
+
#
|
| 25 |
+
# This script makes use of the standard :command:`find_package` arguments of
|
| 26 |
+
# ``<VERSION>``, ``REQUIRED`` and ``QUIET``. ``CUDA_FOUND`` will report if an
|
| 27 |
+
# acceptable version of CUDA was found.
|
| 28 |
+
#
|
| 29 |
+
# The script will prompt the user to specify ``CUDA_TOOLKIT_ROOT_DIR`` if
|
| 30 |
+
# the prefix cannot be determined by the location of nvcc in the system
|
| 31 |
+
# path and ``REQUIRED`` is specified to :command:`find_package`. To use
|
| 32 |
+
# a different installed version of the toolkit set the environment variable
|
| 33 |
+
# ``CUDA_BIN_PATH`` before running cmake (e.g.
|
| 34 |
+
# ``CUDA_BIN_PATH=/usr/local/cuda1.0`` instead of the default
|
| 35 |
+
# ``/usr/local/cuda``) or set ``CUDA_TOOLKIT_ROOT_DIR`` after configuring. If
|
| 36 |
+
# you change the value of ``CUDA_TOOLKIT_ROOT_DIR``, various components that
|
| 37 |
+
# depend on the path will be relocated.
|
| 38 |
+
#
|
| 39 |
+
# It might be necessary to set ``CUDA_TOOLKIT_ROOT_DIR`` manually on certain
|
| 40 |
+
# platforms, or to use a CUDA runtime not installed in the default
|
| 41 |
+
# location. In newer versions of the toolkit the CUDA library is
|
| 42 |
+
# included with the graphics driver -- be sure that the driver version
|
| 43 |
+
# matches what is needed by the CUDA runtime version.
|
| 44 |
+
#
|
| 45 |
+
# The following variables affect the behavior of the macros in the
|
| 46 |
+
# script (in alphebetical order). Note that any of these flags can be
|
| 47 |
+
# changed multiple times in the same directory before calling
|
| 48 |
+
# ``CUDA_ADD_EXECUTABLE``, ``CUDA_ADD_LIBRARY``, ``CUDA_COMPILE``,
|
| 49 |
+
# ``CUDA_COMPILE_PTX``, ``CUDA_COMPILE_FATBIN``, ``CUDA_COMPILE_CUBIN``
|
| 50 |
+
# or ``CUDA_WRAP_SRCS``::
|
| 51 |
+
#
|
| 52 |
+
# CUDA_64_BIT_DEVICE_CODE (Default matches host bit size)
|
| 53 |
+
# -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code.
|
| 54 |
+
# Note that making this different from the host code when generating object
|
| 55 |
+
# or C files from CUDA code just won't work, because size_t gets defined by
|
| 56 |
+
# nvcc in the generated source. If you compile to PTX and then load the
|
| 57 |
+
# file yourself, you can mix bit sizes between device and host.
|
| 58 |
+
#
|
| 59 |
+
# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON)
|
| 60 |
+
# -- Set to ON if you want the custom build rule to be attached to the source
|
| 61 |
+
# file in Visual Studio. Turn OFF if you add the same cuda file to multiple
|
| 62 |
+
# targets.
|
| 63 |
+
#
|
| 64 |
+
# This allows the user to build the target from the CUDA file; however, bad
|
| 65 |
+
# things can happen if the CUDA source file is added to multiple targets.
|
| 66 |
+
# When performing parallel builds it is possible for the custom build
|
| 67 |
+
# command to be run more than once and in parallel causing cryptic build
|
| 68 |
+
# errors. VS runs the rules for every source file in the target, and a
|
| 69 |
+
# source can have only one rule no matter how many projects it is added to.
|
| 70 |
+
# When the rule is run from multiple targets race conditions can occur on
|
| 71 |
+
# the generated file. Eventually everything will get built, but if the user
|
| 72 |
+
# is unaware of this behavior, there may be confusion. It would be nice if
|
| 73 |
+
# this script could detect the reuse of source files across multiple targets
|
| 74 |
+
# and turn the option off for the user, but no good solution could be found.
|
| 75 |
+
#
|
| 76 |
+
# CUDA_BUILD_CUBIN (Default OFF)
|
| 77 |
+
# -- Set to ON to enable and extra compilation pass with the -cubin option in
|
| 78 |
+
# Device mode. The output is parsed and register, shared memory usage is
|
| 79 |
+
# printed during build.
|
| 80 |
+
#
|
| 81 |
+
# CUDA_BUILD_EMULATION (Default OFF for device mode)
|
| 82 |
+
# -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files
|
| 83 |
+
# when CUDA_BUILD_EMULATION is TRUE.
|
| 84 |
+
#
|
| 85 |
+
# CUDA_LINK_LIBRARIES_KEYWORD (Default "")
|
| 86 |
+
# -- The <PRIVATE|PUBLIC|INTERFACE> keyword to use for internal
|
| 87 |
+
# target_link_libraries calls. The default is to use no keyword which
|
| 88 |
+
# uses the old "plain" form of target_link_libraries. Note that is matters
|
| 89 |
+
# because whatever is used inside the FindCUDA module must also be used
|
| 90 |
+
# outside - the two forms of target_link_libraries cannot be mixed.
|
| 91 |
+
#
|
| 92 |
+
# CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR)
|
| 93 |
+
# -- Set to the path you wish to have the generated files placed. If it is
|
| 94 |
+
# blank output files will be placed in CMAKE_CURRENT_BINARY_DIR.
|
| 95 |
+
# Intermediate files will always be placed in
|
| 96 |
+
# CMAKE_CURRENT_BINARY_DIR/CMakeFiles.
|
| 97 |
+
#
|
| 98 |
+
# CUDA_HOST_COMPILATION_CPP (Default ON)
|
| 99 |
+
# -- Set to OFF for C compilation of host code.
|
| 100 |
+
#
|
| 101 |
+
# CUDA_HOST_COMPILER (Default CMAKE_C_COMPILER)
|
| 102 |
+
# -- Set the host compiler to be used by nvcc. Ignored if -ccbin or
|
| 103 |
+
# --compiler-bindir is already present in the CUDA_NVCC_FLAGS or
|
| 104 |
+
# CUDA_NVCC_FLAGS_<CONFIG> variables. For Visual Studio targets,
|
| 105 |
+
# the host compiler is constructed with one or more visual studio macros
|
| 106 |
+
# such as $(VCInstallDir), that expands out to the path when
|
| 107 |
+
# the command is run from within VS.
|
| 108 |
+
# If the CUDAHOSTCXX environment variable is set it will
|
| 109 |
+
# be used as the default.
|
| 110 |
+
#
|
| 111 |
+
# CUDA_NVCC_FLAGS
|
| 112 |
+
# CUDA_NVCC_FLAGS_<CONFIG>
|
| 113 |
+
# -- Additional NVCC command line arguments. NOTE: multiple arguments must be
|
| 114 |
+
# semi-colon delimited (e.g. --compiler-options;-Wall)
|
| 115 |
+
#
|
| 116 |
+
# CUDA_PROPAGATE_HOST_FLAGS (Default ON)
|
| 117 |
+
# -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration
|
| 118 |
+
# dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the
|
| 119 |
+
# host compiler through nvcc's -Xcompiler flag. This helps make the
|
| 120 |
+
# generated host code match the rest of the system better. Sometimes
|
| 121 |
+
# certain flags give nvcc problems, and this will help you turn the flag
|
| 122 |
+
# propagation off. This does not affect the flags supplied directly to nvcc
|
| 123 |
+
# via CUDA_NVCC_FLAGS or through the OPTION flags specified through
|
| 124 |
+
# CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for
|
| 125 |
+
# shared library compilation are not affected by this flag.
|
| 126 |
+
#
|
| 127 |
+
# CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST (Default "")
|
| 128 |
+
# -- A list containing the host flags that should not be propagated when
|
| 129 |
+
# CUDA_PROPAGATE_HOST_FLAGS is ON.
|
| 130 |
+
#
|
| 131 |
+
# CUDA_SEPARABLE_COMPILATION (Default OFF)
|
| 132 |
+
# -- If set this will enable separable compilation for all CUDA runtime object
|
| 133 |
+
# files. If used outside of CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY
|
| 134 |
+
# (e.g. calling CUDA_WRAP_SRCS directly),
|
| 135 |
+
# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME and
|
| 136 |
+
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS should be called.
|
| 137 |
+
#
|
| 138 |
+
# CUDA_SOURCE_PROPERTY_FORMAT
|
| 139 |
+
# -- If this source file property is set, it can override the format specified
|
| 140 |
+
# to CUDA_WRAP_SRCS (OBJ, PTX, CUBIN, or FATBIN). If an input source file
|
| 141 |
+
# is not a .cu file, setting this file will cause it to be treated as a .cu
|
| 142 |
+
# file. See documentation for set_source_files_properties on how to set
|
| 143 |
+
# this property.
|
| 144 |
+
#
|
| 145 |
+
# CUDA_USE_STATIC_CUDA_RUNTIME (Default ON)
|
| 146 |
+
# -- When enabled the static version of the CUDA runtime library will be used
|
| 147 |
+
# in CUDA_LIBRARIES. If the version of CUDA configured doesn't support
|
| 148 |
+
# this option, then it will be silently disabled.
|
| 149 |
+
#
|
| 150 |
+
# CUDA_VERBOSE_BUILD (Default OFF)
|
| 151 |
+
# -- Set to ON to see all the commands used when building the CUDA file. When
|
| 152 |
+
# using a Makefile generator the value defaults to VERBOSE (run make
|
| 153 |
+
# VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will
|
| 154 |
+
# always print the output.
|
| 155 |
+
#
|
| 156 |
+
# The script creates the following macros (in alphebetical order)::
|
| 157 |
+
#
|
| 158 |
+
# CUDA_ADD_CUFFT_TO_TARGET( cuda_target )
|
| 159 |
+
# -- Adds the cufft library to the target (can be any target). Handles whether
|
| 160 |
+
# you are in emulation mode or not.
|
| 161 |
+
#
|
| 162 |
+
# CUDA_ADD_CUBLAS_TO_TARGET( cuda_target )
|
| 163 |
+
# -- Adds the cublas library to the target (can be any target). Handles
|
| 164 |
+
# whether you are in emulation mode or not.
|
| 165 |
+
#
|
| 166 |
+
# CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ...
|
| 167 |
+
# [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
|
| 168 |
+
# -- Creates an executable "cuda_target" which is made up of the files
|
| 169 |
+
# specified. All of the non CUDA C files are compiled using the standard
|
| 170 |
+
# build rules specified by CMAKE and the cuda files are compiled to object
|
| 171 |
+
# files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is
|
| 172 |
+
# added automatically to include_directories(). Some standard CMake target
|
| 173 |
+
# calls can be used on the target after calling this macro
|
| 174 |
+
# (e.g. set_target_properties and target_link_libraries), but setting
|
| 175 |
+
# properties that adjust compilation flags will not affect code compiled by
|
| 176 |
+
# nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE,
|
| 177 |
+
# CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS.
|
| 178 |
+
#
|
| 179 |
+
# CUDA_ADD_LIBRARY( cuda_target file0 file1 ...
|
| 180 |
+
# [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
|
| 181 |
+
# -- Same as CUDA_ADD_EXECUTABLE except that a library is created.
|
| 182 |
+
#
|
| 183 |
+
# CUDA_BUILD_CLEAN_TARGET()
|
| 184 |
+
# -- Creates a convenience target that deletes all the dependency files
|
| 185 |
+
# generated. You should make clean after running this target to ensure the
|
| 186 |
+
# dependency files get regenerated.
|
| 187 |
+
#
|
| 188 |
+
# CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE]
|
| 189 |
+
# [OPTIONS ...] )
|
| 190 |
+
# -- Returns a list of generated files from the input source files to be used
|
| 191 |
+
# with ADD_LIBRARY or ADD_EXECUTABLE.
|
| 192 |
+
#
|
| 193 |
+
# CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] )
|
| 194 |
+
# -- Returns a list of PTX files generated from the input source files.
|
| 195 |
+
#
|
| 196 |
+
# CUDA_COMPILE_FATBIN( generated_files file0 file1 ... [OPTIONS ...] )
|
| 197 |
+
# -- Returns a list of FATBIN files generated from the input source files.
|
| 198 |
+
#
|
| 199 |
+
# CUDA_COMPILE_CUBIN( generated_files file0 file1 ... [OPTIONS ...] )
|
| 200 |
+
# -- Returns a list of CUBIN files generated from the input source files.
|
| 201 |
+
#
|
| 202 |
+
# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME( output_file_var
|
| 203 |
+
# cuda_target
|
| 204 |
+
# object_files )
|
| 205 |
+
# -- Compute the name of the intermediate link file used for separable
|
| 206 |
+
# compilation. This file name is typically passed into
|
| 207 |
+
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS. output_file_var is produced
|
| 208 |
+
# based on cuda_target the list of objects files that need separable
|
| 209 |
+
# compilation as specified by object_files. If the object_files list is
|
| 210 |
+
# empty, then output_file_var will be empty. This function is called
|
| 211 |
+
# automatically for CUDA_ADD_LIBRARY and CUDA_ADD_EXECUTABLE. Note that
|
| 212 |
+
# this is a function and not a macro.
|
| 213 |
+
#
|
| 214 |
+
# CUDA_INCLUDE_DIRECTORIES( path0 path1 ... )
|
| 215 |
+
# -- Sets the directories that should be passed to nvcc
|
| 216 |
+
# (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu
|
| 217 |
+
# files.
|
| 218 |
+
#
|
| 219 |
+
#
|
| 220 |
+
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS( output_file_var cuda_target
|
| 221 |
+
# nvcc_flags object_files)
|
| 222 |
+
# -- Generates the link object required by separable compilation from the given
|
| 223 |
+
# object files. This is called automatically for CUDA_ADD_EXECUTABLE and
|
| 224 |
+
# CUDA_ADD_LIBRARY, but can be called manually when using CUDA_WRAP_SRCS
|
| 225 |
+
# directly. When called from CUDA_ADD_LIBRARY or CUDA_ADD_EXECUTABLE the
|
| 226 |
+
# nvcc_flags passed in are the same as the flags passed in via the OPTIONS
|
| 227 |
+
# argument. The only nvcc flag added automatically is the bitness flag as
|
| 228 |
+
# specified by CUDA_64_BIT_DEVICE_CODE. Note that this is a function
|
| 229 |
+
# instead of a macro.
|
| 230 |
+
#
|
| 231 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures])
|
| 232 |
+
# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures
|
| 233 |
+
# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...)
|
| 234 |
+
# - "Auto" detects local machine GPU compute arch at runtime.
|
| 235 |
+
# - "Common" and "All" cover common and entire subsets of architectures
|
| 236 |
+
# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
|
| 237 |
+
# NAME: Kepler Maxwell Kepler+Tesla Maxwell+Tegra Pascal Volta Turing
|
| 238 |
+
# NUM: Any number. Only those pairs are currently accepted by NVCC though:
|
| 239 |
+
# 3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2 7.5
|
| 240 |
+
# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable}
|
| 241 |
+
# Additionally, sets ${out_variable}_readable to the resulting numeric list
|
| 242 |
+
# Example:
|
| 243 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell)
|
| 244 |
+
# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS})
|
| 245 |
+
#
|
| 246 |
+
# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA
|
| 247 |
+
# Note that this is a function instead of a macro.
|
| 248 |
+
#
|
| 249 |
+
# CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ...
|
| 250 |
+
# [STATIC | SHARED | MODULE] [OPTIONS ...] )
|
| 251 |
+
# -- This is where all the magic happens. CUDA_ADD_EXECUTABLE,
|
| 252 |
+
# CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this
|
| 253 |
+
# function under the hood.
|
| 254 |
+
#
|
| 255 |
+
# Given the list of files (file0 file1 ... fileN) this macro generates
|
| 256 |
+
# custom commands that generate either PTX or linkable objects (use "PTX" or
|
| 257 |
+
# "OBJ" for the format argument to switch). Files that don't end with .cu
|
| 258 |
+
# or have the HEADER_FILE_ONLY property are ignored.
|
| 259 |
+
#
|
| 260 |
+
# The arguments passed in after OPTIONS are extra command line options to
|
| 261 |
+
# give to nvcc. You can also specify per configuration options by
|
| 262 |
+
# specifying the name of the configuration followed by the options. General
|
| 263 |
+
# options must precede configuration specific options. Not all
|
| 264 |
+
# configurations need to be specified, only the ones provided will be used.
|
| 265 |
+
#
|
| 266 |
+
# OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag"
|
| 267 |
+
# DEBUG -g
|
| 268 |
+
# RELEASE --use_fast_math
|
| 269 |
+
# RELWITHDEBINFO --use_fast_math;-g
|
| 270 |
+
# MINSIZEREL --use_fast_math
|
| 271 |
+
#
|
| 272 |
+
# For certain configurations (namely VS generating object files with
|
| 273 |
+
# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will
|
| 274 |
+
# be produced for the given cuda file. This is because when you add the
|
| 275 |
+
# cuda file to Visual Studio it knows that this file produces an object file
|
| 276 |
+
# and will link in the resulting object file automatically.
|
| 277 |
+
#
|
| 278 |
+
# This script will also generate a separate cmake script that is used at
|
| 279 |
+
# build time to invoke nvcc. This is for several reasons.
|
| 280 |
+
#
|
| 281 |
+
# 1. nvcc can return negative numbers as return values which confuses
|
| 282 |
+
# Visual Studio into thinking that the command succeeded. The script now
|
| 283 |
+
# checks the error codes and produces errors when there was a problem.
|
| 284 |
+
#
|
| 285 |
+
# 2. nvcc has been known to not delete incomplete results when it
|
| 286 |
+
# encounters problems. This confuses build systems into thinking the
|
| 287 |
+
# target was generated when in fact an unusable file exists. The script
|
| 288 |
+
# now deletes the output files if there was an error.
|
| 289 |
+
#
|
| 290 |
+
# 3. By putting all the options that affect the build into a file and then
|
| 291 |
+
# make the build rule dependent on the file, the output files will be
|
| 292 |
+
# regenerated when the options change.
|
| 293 |
+
#
|
| 294 |
+
# This script also looks at optional arguments STATIC, SHARED, or MODULE to
|
| 295 |
+
# determine when to target the object compilation for a shared library.
|
| 296 |
+
# BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in
|
| 297 |
+
# CUDA_ADD_LIBRARY. On some systems special flags are added for building
|
| 298 |
+
# objects intended for shared libraries. A preprocessor macro,
|
| 299 |
+
# <target_name>_EXPORTS is defined when a shared library compilation is
|
| 300 |
+
# detected.
|
| 301 |
+
#
|
| 302 |
+
# Flags passed into add_definitions with -D or /D are passed along to nvcc.
|
| 303 |
+
#
|
| 304 |
+
#
|
| 305 |
+
#
|
| 306 |
+
# The script defines the following variables::
|
| 307 |
+
#
|
| 308 |
+
# CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc.
|
| 309 |
+
# CUDA_VERSION_MINOR -- The minor version.
|
| 310 |
+
# CUDA_VERSION
|
| 311 |
+
# CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR
|
| 312 |
+
# CUDA_HAS_FP16 -- Whether a short float (float16,fp16) is supported.
|
| 313 |
+
#
|
| 314 |
+
# CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set).
|
| 315 |
+
# CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the
|
| 316 |
+
# SDK. This script will not directly support finding
|
| 317 |
+
# specific libraries or headers, as that isn't
|
| 318 |
+
# supported by NVIDIA. If you want to change
|
| 319 |
+
# libraries when the path changes see the
|
| 320 |
+
# FindCUDA.cmake script for an example of how to clear
|
| 321 |
+
# these variables. There are also examples of how to
|
| 322 |
+
# use the CUDA_SDK_ROOT_DIR to locate headers or
|
| 323 |
+
# libraries, if you so choose (at your own risk).
|
| 324 |
+
# CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically
|
| 325 |
+
# for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY.
|
| 326 |
+
# CUDA_LIBRARIES -- Cuda RT library.
|
| 327 |
+
# CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT
|
| 328 |
+
# implementation (alternative to:
|
| 329 |
+
# CUDA_ADD_CUFFT_TO_TARGET macro)
|
| 330 |
+
# CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS
|
| 331 |
+
# implementation (alternative to:
|
| 332 |
+
# CUDA_ADD_CUBLAS_TO_TARGET macro).
|
| 333 |
+
# CUDA_cudart_static_LIBRARY -- Statically linkable cuda runtime library.
|
| 334 |
+
# Only available for CUDA version 5.5+
|
| 335 |
+
# CUDA_cudadevrt_LIBRARY -- Device runtime library.
|
| 336 |
+
# Required for separable compilation.
|
| 337 |
+
# CUDA_cupti_LIBRARY -- CUDA Profiling Tools Interface library.
|
| 338 |
+
# Only available for CUDA version 4.0+.
|
| 339 |
+
# CUDA_curand_LIBRARY -- CUDA Random Number Generation library.
|
| 340 |
+
# Only available for CUDA version 3.2+.
|
| 341 |
+
# CUDA_cusolver_LIBRARY -- CUDA Direct Solver library.
|
| 342 |
+
# Only available for CUDA version 7.0+.
|
| 343 |
+
# CUDA_cusparse_LIBRARY -- CUDA Sparse Matrix library.
|
| 344 |
+
# Only available for CUDA version 3.2+.
|
| 345 |
+
# CUDA_npp_LIBRARY -- NVIDIA Performance Primitives lib.
|
| 346 |
+
# Only available for CUDA version 4.0+.
|
| 347 |
+
# CUDA_nppc_LIBRARY -- NVIDIA Performance Primitives lib (core).
|
| 348 |
+
# Only available for CUDA version 5.5+.
|
| 349 |
+
# CUDA_nppi_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 350 |
+
# Only available for CUDA version 5.5 - 8.0.
|
| 351 |
+
# CUDA_nppial_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 352 |
+
# Only available for CUDA version 9.0.
|
| 353 |
+
# CUDA_nppicc_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 354 |
+
# Only available for CUDA version 9.0.
|
| 355 |
+
# CUDA_nppicom_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 356 |
+
# Only available for CUDA version 9.0.
|
| 357 |
+
# CUDA_nppidei_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 358 |
+
# Only available for CUDA version 9.0.
|
| 359 |
+
# CUDA_nppif_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 360 |
+
# Only available for CUDA version 9.0.
|
| 361 |
+
# CUDA_nppig_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 362 |
+
# Only available for CUDA version 9.0.
|
| 363 |
+
# CUDA_nppim_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 364 |
+
# Only available for CUDA version 9.0.
|
| 365 |
+
# CUDA_nppist_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 366 |
+
# Only available for CUDA version 9.0.
|
| 367 |
+
# CUDA_nppisu_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 368 |
+
# Only available for CUDA version 9.0.
|
| 369 |
+
# CUDA_nppitc_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
|
| 370 |
+
# Only available for CUDA version 9.0.
|
| 371 |
+
# CUDA_npps_LIBRARY -- NVIDIA Performance Primitives lib (signal processing).
|
| 372 |
+
# Only available for CUDA version 5.5+.
|
| 373 |
+
# CUDA_nvcuvenc_LIBRARY -- CUDA Video Encoder library.
|
| 374 |
+
# Only available for CUDA version 3.2+.
|
| 375 |
+
# Windows only.
|
| 376 |
+
# CUDA_nvcuvid_LIBRARY -- CUDA Video Decoder library.
|
| 377 |
+
# Only available for CUDA version 3.2+.
|
| 378 |
+
# Windows only.
|
| 379 |
+
#
|
| 380 |
+
|
| 381 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 382 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 383 |
+
#
|
| 384 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 385 |
+
#
|
| 386 |
+
# Copyright (c) 2007-2009
|
| 387 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 388 |
+
#
|
| 389 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 390 |
+
# for the text of the license.
|
| 391 |
+
|
| 392 |
+
# The MIT License
|
| 393 |
+
#
|
| 394 |
+
# License for the specific language governing rights and limitations under
|
| 395 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 396 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 397 |
+
# to deal in the Software without restriction, including without limitation
|
| 398 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 399 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 400 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 401 |
+
#
|
| 402 |
+
# The above copyright notice and this permission notice shall be included
|
| 403 |
+
# in all copies or substantial portions of the Software.
|
| 404 |
+
#
|
| 405 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 406 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 407 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 408 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 409 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 410 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 411 |
+
# DEALINGS IN THE SOFTWARE.
|
| 412 |
+
#
|
| 413 |
+
###############################################################################
|
| 414 |
+
|
| 415 |
+
# FindCUDA.cmake
|
| 416 |
+
|
| 417 |
+
# This macro helps us find the location of helper files we will need the full path to
|
| 418 |
+
macro(CUDA_FIND_HELPER_FILE _name _extension)
|
| 419 |
+
set(_full_name "${_name}.${_extension}")
|
| 420 |
+
# CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being
|
| 421 |
+
# processed. Using this variable, we can pull out the current path, and
|
| 422 |
+
# provide a way to get access to the other files we need local to here.
|
| 423 |
+
get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
| 424 |
+
set(CUDA_${_name} "${CMAKE_CURRENT_LIST_DIR}/FindCUDA/${_full_name}")
|
| 425 |
+
if(NOT EXISTS "${CUDA_${_name}}")
|
| 426 |
+
set(error_message "${_full_name} not found in ${CMAKE_CURRENT_LIST_DIR}/FindCUDA")
|
| 427 |
+
if(CUDA_FIND_REQUIRED)
|
| 428 |
+
message(FATAL_ERROR "${error_message}")
|
| 429 |
+
else()
|
| 430 |
+
if(NOT CUDA_FIND_QUIETLY)
|
| 431 |
+
message(STATUS "${error_message}")
|
| 432 |
+
endif()
|
| 433 |
+
endif()
|
| 434 |
+
endif()
|
| 435 |
+
# Set this variable as internal, so the user isn't bugged with it.
|
| 436 |
+
set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE)
|
| 437 |
+
endmacro()
|
| 438 |
+
|
| 439 |
+
#####################################################################
|
| 440 |
+
## CUDA_INCLUDE_NVCC_DEPENDENCIES
|
| 441 |
+
##
|
| 442 |
+
|
| 443 |
+
# So we want to try and include the dependency file if it exists. If
|
| 444 |
+
# it doesn't exist then we need to create an empty one, so we can
|
| 445 |
+
# include it.
|
| 446 |
+
|
| 447 |
+
# If it does exist, then we need to check to see if all the files it
|
| 448 |
+
# depends on exist. If they don't then we should clear the dependency
|
| 449 |
+
# file and regenerate it later. This covers the case where a header
|
| 450 |
+
# file has disappeared or moved.
|
| 451 |
+
|
| 452 |
+
macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file)
|
| 453 |
+
set(CUDA_NVCC_DEPEND)
|
| 454 |
+
set(CUDA_NVCC_DEPEND_REGENERATE FALSE)
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
# Include the dependency file. Create it first if it doesn't exist . The
|
| 458 |
+
# INCLUDE puts a dependency that will force CMake to rerun and bring in the
|
| 459 |
+
# new info when it changes. DO NOT REMOVE THIS (as I did and spent a few
|
| 460 |
+
# hours figuring out why it didn't work.
|
| 461 |
+
if(NOT EXISTS ${dependency_file})
|
| 462 |
+
file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
|
| 463 |
+
endif()
|
| 464 |
+
# Always include this file to force CMake to run again next
|
| 465 |
+
# invocation and rebuild the dependencies.
|
| 466 |
+
#message("including dependency_file = ${dependency_file}")
|
| 467 |
+
include(${dependency_file})
|
| 468 |
+
|
| 469 |
+
# Now we need to verify the existence of all the included files
|
| 470 |
+
# here. If they aren't there we need to just blank this variable and
|
| 471 |
+
# make the file regenerate again.
|
| 472 |
+
# if(DEFINED CUDA_NVCC_DEPEND)
|
| 473 |
+
# message("CUDA_NVCC_DEPEND set")
|
| 474 |
+
# else()
|
| 475 |
+
# message("CUDA_NVCC_DEPEND NOT set")
|
| 476 |
+
# endif()
|
| 477 |
+
if(CUDA_NVCC_DEPEND)
|
| 478 |
+
#message("CUDA_NVCC_DEPEND found")
|
| 479 |
+
foreach(f ${CUDA_NVCC_DEPEND})
|
| 480 |
+
# message("searching for ${f}")
|
| 481 |
+
if(NOT EXISTS ${f})
|
| 482 |
+
#message("file ${f} not found")
|
| 483 |
+
set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
|
| 484 |
+
endif()
|
| 485 |
+
endforeach()
|
| 486 |
+
else()
|
| 487 |
+
#message("CUDA_NVCC_DEPEND false")
|
| 488 |
+
# No dependencies, so regenerate the file.
|
| 489 |
+
set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
|
| 490 |
+
endif()
|
| 491 |
+
|
| 492 |
+
#message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}")
|
| 493 |
+
# No incoming dependencies, so we need to generate them. Make the
|
| 494 |
+
# output depend on the dependency file itself, which should cause the
|
| 495 |
+
# rule to re-run.
|
| 496 |
+
if(CUDA_NVCC_DEPEND_REGENERATE)
|
| 497 |
+
set(CUDA_NVCC_DEPEND ${dependency_file})
|
| 498 |
+
#message("Generating an empty dependency_file: ${dependency_file}")
|
| 499 |
+
file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
|
| 500 |
+
endif()
|
| 501 |
+
|
| 502 |
+
endmacro()
|
| 503 |
+
|
| 504 |
+
###############################################################################
|
| 505 |
+
###############################################################################
|
| 506 |
+
# Setup variables' defaults
|
| 507 |
+
###############################################################################
|
| 508 |
+
###############################################################################
|
| 509 |
+
|
| 510 |
+
# Allow the user to specify if the device code is supposed to be 32 or 64 bit.
|
| 511 |
+
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
| 512 |
+
set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON)
|
| 513 |
+
else()
|
| 514 |
+
set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF)
|
| 515 |
+
endif()
|
| 516 |
+
option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT})
|
| 517 |
+
|
| 518 |
+
# Attach the build rule to the source file in VS. This option
|
| 519 |
+
option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON)
|
| 520 |
+
|
| 521 |
+
# Prints out extra information about the cuda file during compilation
|
| 522 |
+
option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF)
|
| 523 |
+
|
| 524 |
+
# Set whether we are using emulation or device mode.
|
| 525 |
+
option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF)
|
| 526 |
+
|
| 527 |
+
# Where to put the generated output.
|
| 528 |
+
set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR")
|
| 529 |
+
|
| 530 |
+
# Parse HOST_COMPILATION mode.
|
| 531 |
+
option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON)
|
| 532 |
+
|
| 533 |
+
# Extra user settable flags
|
| 534 |
+
cmake_initialize_per_config_variable(CUDA_NVCC_FLAGS "Semi-colon delimit multiple arguments.")
|
| 535 |
+
|
| 536 |
+
if(DEFINED ENV{CUDAHOSTCXX})
|
| 537 |
+
set(CUDA_HOST_COMPILER "$ENV{CUDAHOSTCXX}" CACHE FILEPATH "Host side compiler used by NVCC")
|
| 538 |
+
elseif(CMAKE_GENERATOR MATCHES "Visual Studio")
|
| 539 |
+
set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)Tools/MSVC/$(VCToolsVersion)/bin/Host$(Platform)/$(PlatformTarget)")
|
| 540 |
+
if(MSVC_VERSION LESS 1910)
|
| 541 |
+
set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)bin")
|
| 542 |
+
endif()
|
| 543 |
+
|
| 544 |
+
set(CUDA_HOST_COMPILER "${_CUDA_MSVC_HOST_COMPILER}" CACHE FILEPATH "Host side compiler used by NVCC")
|
| 545 |
+
|
| 546 |
+
else()
|
| 547 |
+
if(APPLE
|
| 548 |
+
AND "${CMAKE_C_COMPILER_ID}" MATCHES "Clang"
|
| 549 |
+
AND "${CMAKE_C_COMPILER}" MATCHES "/cc$")
|
| 550 |
+
# Using cc which is symlink to clang may let NVCC think it is GCC and issue
|
| 551 |
+
# unhandled -dumpspecs option to clang. Also in case neither
|
| 552 |
+
# CMAKE_C_COMPILER is defined (project does not use C language) nor
|
| 553 |
+
# CUDA_HOST_COMPILER is specified manually we should skip -ccbin and let
|
| 554 |
+
# nvcc use its own default C compiler.
|
| 555 |
+
# Only care about this on APPLE with clang to avoid
|
| 556 |
+
# following symlinks to things like ccache
|
| 557 |
+
if(DEFINED CMAKE_C_COMPILER AND NOT DEFINED CUDA_HOST_COMPILER)
|
| 558 |
+
get_filename_component(c_compiler_realpath "${CMAKE_C_COMPILER}" REALPATH)
|
| 559 |
+
# if the real path does not end up being clang then
|
| 560 |
+
# go back to using CMAKE_C_COMPILER
|
| 561 |
+
if(NOT "${c_compiler_realpath}" MATCHES "/clang$")
|
| 562 |
+
set(c_compiler_realpath "${CMAKE_C_COMPILER}")
|
| 563 |
+
endif()
|
| 564 |
+
else()
|
| 565 |
+
set(c_compiler_realpath "")
|
| 566 |
+
endif()
|
| 567 |
+
set(CUDA_HOST_COMPILER "${c_compiler_realpath}" CACHE FILEPATH "Host side compiler used by NVCC")
|
| 568 |
+
elseif(MSVC AND "${CMAKE_C_COMPILER}" MATCHES "clcache|sccache")
|
| 569 |
+
# NVCC does not think it will work if it is passed clcache.exe or sccache.exe
|
| 570 |
+
# as the host compiler, which means that builds with CC=cl.exe won't work.
|
| 571 |
+
# Best to just feed it whatever the actual cl.exe is as the host compiler.
|
| 572 |
+
set(CUDA_HOST_COMPILER "cl.exe" CACHE FILEPATH "Host side compiler used by NVCC")
|
| 573 |
+
else()
|
| 574 |
+
set(CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}"
|
| 575 |
+
CACHE FILEPATH "Host side compiler used by NVCC")
|
| 576 |
+
endif()
|
| 577 |
+
endif()
|
| 578 |
+
|
| 579 |
+
# Propagate the host flags to the host compiler via -Xcompiler
|
| 580 |
+
option(CUDA_PROPAGATE_HOST_FLAGS "Propagate C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON)
|
| 581 |
+
|
| 582 |
+
# Blacklisted flags to prevent propagation
|
| 583 |
+
set(CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST "" CACHE STRING "Blacklisted flags to prevent propagation")
|
| 584 |
+
|
| 585 |
+
# Enable CUDA_SEPARABLE_COMPILATION
|
| 586 |
+
option(CUDA_SEPARABLE_COMPILATION "Compile CUDA objects with separable compilation enabled. Requires CUDA 5.0+" OFF)
|
| 587 |
+
|
| 588 |
+
# Specifies whether the commands used when compiling the .cu file will be printed out.
|
| 589 |
+
option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF)
|
| 590 |
+
|
| 591 |
+
mark_as_advanced(
|
| 592 |
+
CUDA_64_BIT_DEVICE_CODE
|
| 593 |
+
CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE
|
| 594 |
+
CUDA_GENERATED_OUTPUT_DIR
|
| 595 |
+
CUDA_HOST_COMPILATION_CPP
|
| 596 |
+
CUDA_NVCC_FLAGS
|
| 597 |
+
CUDA_PROPAGATE_HOST_FLAGS
|
| 598 |
+
CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST
|
| 599 |
+
CUDA_BUILD_CUBIN
|
| 600 |
+
CUDA_BUILD_EMULATION
|
| 601 |
+
CUDA_VERBOSE_BUILD
|
| 602 |
+
CUDA_SEPARABLE_COMPILATION
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
# Single config generators like Makefiles or Ninja don't usually have
|
| 606 |
+
# CMAKE_CONFIGURATION_TYPES defined (but note that it can be defined if set by
|
| 607 |
+
# projects or developers). Even CMAKE_BUILD_TYPE might not be defined for
|
| 608 |
+
# single config generators (and should not be defined for multi-config
|
| 609 |
+
# generators). To ensure we get a complete superset of all possible
|
| 610 |
+
# configurations, we combine CMAKE_CONFIGURATION_TYPES, CMAKE_BUILD_TYPE and
|
| 611 |
+
# all of the standard configurations, then weed out duplicates with
|
| 612 |
+
# list(REMOVE_DUPLICATES). Looping over the unique set then ensures we have
|
| 613 |
+
# each configuration-specific set of nvcc flags defined and marked as advanced.
|
| 614 |
+
set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)
|
| 615 |
+
list(REMOVE_DUPLICATES CUDA_configuration_types)
|
| 616 |
+
|
| 617 |
+
###############################################################################
|
| 618 |
+
###############################################################################
|
| 619 |
+
# Locate CUDA, Set Build Type, etc.
|
| 620 |
+
###############################################################################
|
| 621 |
+
###############################################################################
|
| 622 |
+
|
| 623 |
+
macro(cuda_unset_include_and_libraries)
|
| 624 |
+
unset(CUDA_TOOLKIT_INCLUDE CACHE)
|
| 625 |
+
unset(CUDA_CUDART_LIBRARY CACHE)
|
| 626 |
+
unset(CUDA_CUDA_LIBRARY CACHE)
|
| 627 |
+
# Make sure you run this before you unset CUDA_VERSION.
|
| 628 |
+
unset(CUDA_cudart_static_LIBRARY CACHE)
|
| 629 |
+
unset(CUDA_cudadevrt_LIBRARY CACHE)
|
| 630 |
+
unset(CUDA_cublas_LIBRARY CACHE)
|
| 631 |
+
unset(CUDA_cublas_device_LIBRARY CACHE)
|
| 632 |
+
unset(CUDA_cublasemu_LIBRARY CACHE)
|
| 633 |
+
unset(CUDA_cublasLt_LIBRARY CACHE)
|
| 634 |
+
unset(CUDA_cufft_LIBRARY CACHE)
|
| 635 |
+
unset(CUDA_cufftemu_LIBRARY CACHE)
|
| 636 |
+
unset(CUDA_cupti_LIBRARY CACHE)
|
| 637 |
+
unset(CUDA_curand_LIBRARY CACHE)
|
| 638 |
+
unset(CUDA_cusolver_LIBRARY CACHE)
|
| 639 |
+
unset(CUDA_cusparse_LIBRARY CACHE)
|
| 640 |
+
unset(CUDA_npp_LIBRARY CACHE)
|
| 641 |
+
unset(CUDA_nppc_LIBRARY CACHE)
|
| 642 |
+
unset(CUDA_nppi_LIBRARY CACHE)
|
| 643 |
+
unset(CUDA_npps_LIBRARY CACHE)
|
| 644 |
+
unset(CUDA_nvcuvenc_LIBRARY CACHE)
|
| 645 |
+
unset(CUDA_nvcuvid_LIBRARY CACHE)
|
| 646 |
+
unset(CUDA_GPU_DETECT_OUTPUT CACHE)
|
| 647 |
+
endmacro()
|
| 648 |
+
|
| 649 |
+
# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed,
|
| 650 |
+
# if they have then clear the cache variables, so that will be detected again.
|
| 651 |
+
if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
|
| 652 |
+
unset(CUDA_TOOLKIT_TARGET_DIR CACHE)
|
| 653 |
+
unset(CUDA_NVCC_EXECUTABLE CACHE)
|
| 654 |
+
cuda_unset_include_and_libraries()
|
| 655 |
+
unset(CUDA_VERSION CACHE)
|
| 656 |
+
endif()
|
| 657 |
+
|
| 658 |
+
if(NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")
|
| 659 |
+
cuda_unset_include_and_libraries()
|
| 660 |
+
endif()
|
| 661 |
+
|
| 662 |
+
#
|
| 663 |
+
# End of unset()
|
| 664 |
+
#
|
| 665 |
+
|
| 666 |
+
#
|
| 667 |
+
# Start looking for things
|
| 668 |
+
#
|
| 669 |
+
|
| 670 |
+
# Search for the cuda distribution.
|
| 671 |
+
if(NOT CUDA_TOOLKIT_ROOT_DIR AND NOT CMAKE_CROSSCOMPILING)
|
| 672 |
+
# Search in the CUDA_BIN_PATH first.
|
| 673 |
+
find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC
|
| 674 |
+
NAMES nvcc nvcc.exe
|
| 675 |
+
PATHS
|
| 676 |
+
ENV CUDA_TOOLKIT_ROOT
|
| 677 |
+
ENV CUDA_PATH
|
| 678 |
+
ENV CUDA_BIN_PATH
|
| 679 |
+
PATH_SUFFIXES bin bin64
|
| 680 |
+
DOC "Toolkit location."
|
| 681 |
+
NO_DEFAULT_PATH
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
# Now search default paths
|
| 685 |
+
find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC
|
| 686 |
+
NAMES nvcc nvcc.exe
|
| 687 |
+
PATHS /opt/cuda/bin
|
| 688 |
+
PATH_SUFFIXES cuda/bin
|
| 689 |
+
DOC "Toolkit location."
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
if (CUDA_TOOLKIT_ROOT_DIR_NVCC)
|
| 693 |
+
get_filename_component(CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR "${CUDA_TOOLKIT_ROOT_DIR_NVCC}" DIRECTORY)
|
| 694 |
+
get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR}" DIRECTORY CACHE)
|
| 695 |
+
string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR})
|
| 696 |
+
# We need to force this back into the cache.
|
| 697 |
+
set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE)
|
| 698 |
+
set(CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR})
|
| 699 |
+
endif()
|
| 700 |
+
unset(CUDA_TOOLKIT_ROOT_DIR_NVCC CACHE)
|
| 701 |
+
|
| 702 |
+
if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
|
| 703 |
+
if(CUDA_FIND_REQUIRED)
|
| 704 |
+
message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR")
|
| 705 |
+
elseif(NOT CUDA_FIND_QUIETLY)
|
| 706 |
+
message("CUDA_TOOLKIT_ROOT_DIR not found or specified")
|
| 707 |
+
endif()
|
| 708 |
+
endif ()
|
| 709 |
+
endif ()
|
| 710 |
+
|
| 711 |
+
if(CMAKE_CROSSCOMPILING)
|
| 712 |
+
SET (CUDA_TOOLKIT_ROOT $ENV{CUDA_TOOLKIT_ROOT})
|
| 713 |
+
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a")
|
| 714 |
+
# Support for NVPACK
|
| 715 |
+
set (CUDA_TOOLKIT_TARGET_NAME "armv7-linux-androideabi")
|
| 716 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
|
| 717 |
+
# Support for arm cross compilation
|
| 718 |
+
set(CUDA_TOOLKIT_TARGET_NAME "armv7-linux-gnueabihf")
|
| 719 |
+
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
| 720 |
+
# Support for aarch64 cross compilation
|
| 721 |
+
if (ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 722 |
+
set(CUDA_TOOLKIT_TARGET_NAME "aarch64-linux-androideabi")
|
| 723 |
+
else()
|
| 724 |
+
set(CUDA_TOOLKIT_TARGET_NAME "aarch64-linux")
|
| 725 |
+
endif (ANDROID_ARCH_NAME STREQUAL "arm64")
|
| 726 |
+
endif()
|
| 727 |
+
|
| 728 |
+
if (EXISTS "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}")
|
| 729 |
+
set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}" CACHE PATH "CUDA Toolkit target location.")
|
| 730 |
+
SET (CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT})
|
| 731 |
+
mark_as_advanced(CUDA_TOOLKIT_TARGET_DIR)
|
| 732 |
+
endif()
|
| 733 |
+
|
| 734 |
+
# add known CUDA targetr root path to the set of directories we search for programs, libraries and headers
|
| 735 |
+
set( CMAKE_FIND_ROOT_PATH "${CUDA_TOOLKIT_TARGET_DIR};${CMAKE_FIND_ROOT_PATH}")
|
| 736 |
+
macro( cuda_find_host_program )
|
| 737 |
+
if (COMMAND find_host_program)
|
| 738 |
+
find_host_program( ${ARGN} )
|
| 739 |
+
else()
|
| 740 |
+
find_program( ${ARGN} )
|
| 741 |
+
endif()
|
| 742 |
+
endmacro()
|
| 743 |
+
else()
|
| 744 |
+
# for non-cross-compile, find_host_program == find_program and CUDA_TOOLKIT_TARGET_DIR == CUDA_TOOLKIT_ROOT_DIR
|
| 745 |
+
macro( cuda_find_host_program )
|
| 746 |
+
find_program( ${ARGN} )
|
| 747 |
+
endmacro()
|
| 748 |
+
SET (CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR})
|
| 749 |
+
endif()
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
# CUDA_NVCC_EXECUTABLE
|
| 753 |
+
if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
|
| 754 |
+
set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
|
| 755 |
+
else()
|
| 756 |
+
cuda_find_host_program(CUDA_NVCC_EXECUTABLE
|
| 757 |
+
NAMES nvcc
|
| 758 |
+
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
|
| 759 |
+
ENV CUDA_PATH
|
| 760 |
+
ENV CUDA_BIN_PATH
|
| 761 |
+
PATH_SUFFIXES bin bin64
|
| 762 |
+
NO_DEFAULT_PATH
|
| 763 |
+
)
|
| 764 |
+
# Search default search paths, after we search our own set of paths.
|
| 765 |
+
cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc)
|
| 766 |
+
endif()
|
| 767 |
+
|
| 768 |
+
if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION)
|
| 769 |
+
# Compute the version.
|
| 770 |
+
execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} "--version"
|
| 771 |
+
OUTPUT_VARIABLE NVCC_OUT
|
| 772 |
+
RESULT_VARIABLE NVCC_RC)
|
| 773 |
+
if(NOT (${NVCC_RC} EQUAL 0))
|
| 774 |
+
message(WARNING "Failed to execute '${CUDA_NVCC_EXECUTABLE} --version'")
|
| 775 |
+
set(CUDA_FOUND FALSE)
|
| 776 |
+
return()
|
| 777 |
+
endif()
|
| 778 |
+
string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT})
|
| 779 |
+
string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT})
|
| 780 |
+
set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.")
|
| 781 |
+
mark_as_advanced(CUDA_VERSION)
|
| 782 |
+
else()
|
| 783 |
+
# Need to set these based off of the cached value
|
| 784 |
+
string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR "${CUDA_VERSION}")
|
| 785 |
+
string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR "${CUDA_VERSION}")
|
| 786 |
+
endif()
|
| 787 |
+
|
| 788 |
+
# Always set this convenience variable
|
| 789 |
+
set(CUDA_VERSION_STRING "${CUDA_VERSION}")
|
| 790 |
+
|
| 791 |
+
# CUDA_TOOLKIT_INCLUDE
|
| 792 |
+
find_path(CUDA_TOOLKIT_INCLUDE
|
| 793 |
+
device_functions.h # Header included in toolkit
|
| 794 |
+
PATHS ${CUDA_TOOLKIT_TARGET_DIR}
|
| 795 |
+
ENV CUDA_PATH
|
| 796 |
+
ENV CUDA_INC_PATH
|
| 797 |
+
PATH_SUFFIXES include
|
| 798 |
+
NO_DEFAULT_PATH
|
| 799 |
+
)
|
| 800 |
+
# Search default search paths, after we search our own set of paths.
|
| 801 |
+
find_path(CUDA_TOOLKIT_INCLUDE device_functions.h)
|
| 802 |
+
mark_as_advanced(CUDA_TOOLKIT_INCLUDE)
|
| 803 |
+
|
| 804 |
+
set(CUDA_HAS_FP16 TRUE)
|
| 805 |
+
|
| 806 |
+
# Set the user list of include dir to nothing to initialize it.
|
| 807 |
+
set (CUDA_NVCC_INCLUDE_DIRS_USER "")
|
| 808 |
+
set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE})
|
| 809 |
+
|
| 810 |
+
macro(cuda_find_library_local_first_with_path_ext _var _names _doc _path_ext )
|
| 811 |
+
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
| 812 |
+
# CUDA 3.2+ on Windows moved the library directories, so we need the new
|
| 813 |
+
# and old paths.
|
| 814 |
+
set(_cuda_64bit_lib_dir "${_path_ext}lib/x64" "${_path_ext}lib64" "${_path_ext}libx64" )
|
| 815 |
+
endif()
|
| 816 |
+
# CUDA 3.2+ on Windows moved the library directories, so we need to new
|
| 817 |
+
# (lib/Win32) and the old path (lib).
|
| 818 |
+
find_library(${_var}
|
| 819 |
+
NAMES ${_names}
|
| 820 |
+
PATHS "${CUDA_TOOLKIT_TARGET_DIR}"
|
| 821 |
+
ENV CUDA_PATH
|
| 822 |
+
ENV CUDA_LIB_PATH
|
| 823 |
+
PATH_SUFFIXES ${_cuda_64bit_lib_dir} "${_path_ext}lib/Win32" "${_path_ext}lib" "${_path_ext}libWin32"
|
| 824 |
+
DOC ${_doc}
|
| 825 |
+
NO_DEFAULT_PATH
|
| 826 |
+
)
|
| 827 |
+
if (NOT CMAKE_CROSSCOMPILING)
|
| 828 |
+
# Search default search paths, after we search our own set of paths.
|
| 829 |
+
find_library(${_var}
|
| 830 |
+
NAMES ${_names}
|
| 831 |
+
PATHS "/usr/lib/nvidia-current"
|
| 832 |
+
DOC ${_doc}
|
| 833 |
+
)
|
| 834 |
+
endif()
|
| 835 |
+
endmacro()
|
| 836 |
+
|
| 837 |
+
macro(cuda_find_library_local_first _var _names _doc)
|
| 838 |
+
cuda_find_library_local_first_with_path_ext( "${_var}" "${_names}" "${_doc}" "" )
|
| 839 |
+
endmacro()
|
| 840 |
+
|
| 841 |
+
macro(find_library_local_first _var _names _doc )
|
| 842 |
+
cuda_find_library_local_first( "${_var}" "${_names}" "${_doc}" "" )
|
| 843 |
+
endmacro()
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
# CUDA_LIBRARIES
|
| 847 |
+
cuda_find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library")
|
| 848 |
+
|
| 849 |
+
cuda_find_library_local_first(CUDA_cudart_static_LIBRARY cudart_static "static CUDA runtime library")
|
| 850 |
+
mark_as_advanced(CUDA_cudart_static_LIBRARY)
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
if(CUDA_cudart_static_LIBRARY)
|
| 854 |
+
# If static cudart available, use it by default, but provide a user-visible option to disable it.
|
| 855 |
+
option(CUDA_USE_STATIC_CUDA_RUNTIME "Use the static version of the CUDA runtime library if available" ON)
|
| 856 |
+
else()
|
| 857 |
+
# If not available, silently disable the option.
|
| 858 |
+
set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "")
|
| 859 |
+
endif()
|
| 860 |
+
|
| 861 |
+
if(CUDA_USE_STATIC_CUDA_RUNTIME)
|
| 862 |
+
set(CUDA_CUDART_LIBRARY_VAR CUDA_cudart_static_LIBRARY)
|
| 863 |
+
else()
|
| 864 |
+
set(CUDA_CUDART_LIBRARY_VAR CUDA_CUDART_LIBRARY)
|
| 865 |
+
endif()
|
| 866 |
+
|
| 867 |
+
cuda_find_library_local_first(CUDA_cudadevrt_LIBRARY cudadevrt "\"cudadevrt\" library")
|
| 868 |
+
mark_as_advanced(CUDA_cudadevrt_LIBRARY)
|
| 869 |
+
|
| 870 |
+
if(CUDA_USE_STATIC_CUDA_RUNTIME)
|
| 871 |
+
if(UNIX)
|
| 872 |
+
# Check for the dependent libraries. Here we look for pthreads.
|
| 873 |
+
if (DEFINED CMAKE_THREAD_PREFER_PTHREAD)
|
| 874 |
+
set(_cuda_cmake_thread_prefer_pthread ${CMAKE_THREAD_PREFER_PTHREAD})
|
| 875 |
+
endif()
|
| 876 |
+
set(CMAKE_THREAD_PREFER_PTHREAD 1)
|
| 877 |
+
|
| 878 |
+
# Many of the FindXYZ CMake comes with makes use of try_compile with int main(){return 0;}
|
| 879 |
+
# as the source file. Unfortunately this causes a warning with -Wstrict-prototypes and
|
| 880 |
+
# -Werror causes the try_compile to fail. We will just temporarily disable other flags
|
| 881 |
+
# when doing the find_package command here.
|
| 882 |
+
set(_cuda_cmake_c_flags ${CMAKE_C_FLAGS})
|
| 883 |
+
set(CMAKE_C_FLAGS "-fPIC")
|
| 884 |
+
find_package(Threads REQUIRED)
|
| 885 |
+
set(CMAKE_C_FLAGS ${_cuda_cmake_c_flags})
|
| 886 |
+
|
| 887 |
+
if (DEFINED _cuda_cmake_thread_prefer_pthread)
|
| 888 |
+
set(CMAKE_THREAD_PREFER_PTHREAD ${_cuda_cmake_thread_prefer_pthread})
|
| 889 |
+
unset(_cuda_cmake_thread_prefer_pthread)
|
| 890 |
+
else()
|
| 891 |
+
unset(CMAKE_THREAD_PREFER_PTHREAD)
|
| 892 |
+
endif()
|
| 893 |
+
|
| 894 |
+
if(NOT APPLE)
|
| 895 |
+
#On Linux, you must link against librt when using the static cuda runtime.
|
| 896 |
+
find_library(CUDA_rt_LIBRARY rt)
|
| 897 |
+
if (NOT CUDA_rt_LIBRARY)
|
| 898 |
+
message(WARNING "Expecting to find librt for libcudart_static, but didn't find it.")
|
| 899 |
+
endif()
|
| 900 |
+
endif()
|
| 901 |
+
endif()
|
| 902 |
+
endif()
|
| 903 |
+
|
| 904 |
+
cuda_find_library_local_first_with_path_ext(CUDA_cupti_LIBRARY cupti "\"cupti\" library" "extras/CUPTI/")
|
| 905 |
+
mark_as_advanced(CUDA_cupti_LIBRARY)
|
| 906 |
+
|
| 907 |
+
# Set the CUDA_LIBRARIES variable. This is the set of stuff to link against if you are
|
| 908 |
+
# using the CUDA runtime. For the dynamic version of the runtime, most of the
|
| 909 |
+
# dependencies are brough in, but for the static version there are additional libraries
|
| 910 |
+
# and linker commands needed.
|
| 911 |
+
# Initialize to empty
|
| 912 |
+
set(CUDA_LIBRARIES)
|
| 913 |
+
|
| 914 |
+
# If we are using emulation mode and we found the cudartemu library then use
|
| 915 |
+
# that one instead of cudart.
|
| 916 |
+
if(CUDA_BUILD_EMULATION AND CUDA_CUDARTEMU_LIBRARY)
|
| 917 |
+
list(APPEND CUDA_LIBRARIES ${CUDA_CUDARTEMU_LIBRARY})
|
| 918 |
+
elseif(CUDA_USE_STATIC_CUDA_RUNTIME AND CUDA_cudart_static_LIBRARY)
|
| 919 |
+
list(APPEND CUDA_LIBRARIES ${CUDA_cudart_static_LIBRARY} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS})
|
| 920 |
+
if (CUDA_rt_LIBRARY)
|
| 921 |
+
list(APPEND CUDA_LIBRARIES ${CUDA_rt_LIBRARY})
|
| 922 |
+
endif()
|
| 923 |
+
if(APPLE)
|
| 924 |
+
# We need to add the default path to the driver (libcuda.dylib) as an rpath, so that
|
| 925 |
+
# the static cuda runtime can find it at runtime.
|
| 926 |
+
list(APPEND CUDA_LIBRARIES -Wl,-rpath,/usr/local/cuda/lib)
|
| 927 |
+
endif()
|
| 928 |
+
else()
|
| 929 |
+
list(APPEND CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY})
|
| 930 |
+
endif()
|
| 931 |
+
|
| 932 |
+
# 1.1 toolkit on linux doesn't appear to have a separate library on
|
| 933 |
+
# some platforms.
|
| 934 |
+
cuda_find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).")
|
| 935 |
+
|
| 936 |
+
mark_as_advanced(
|
| 937 |
+
CUDA_CUDA_LIBRARY
|
| 938 |
+
CUDA_CUDART_LIBRARY
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
#######################
|
| 942 |
+
# Look for some of the toolkit helper libraries
|
| 943 |
+
macro(FIND_CUDA_HELPER_LIBS _name)
|
| 944 |
+
cuda_find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library")
|
| 945 |
+
mark_as_advanced(CUDA_${_name}_LIBRARY)
|
| 946 |
+
endmacro()
|
| 947 |
+
|
| 948 |
+
if(CUDA_BUILD_EMULATION)
|
| 949 |
+
message(FATAL_ERROR "CUDA_BUILD_EMULATION is not supported in version 3.1 and onwards. You must disable it to proceed. You have version ${CUDA_VERSION}.")
|
| 950 |
+
endif()
|
| 951 |
+
|
| 952 |
+
find_cuda_helper_libs(cufft)
|
| 953 |
+
find_cuda_helper_libs(cublas)
|
| 954 |
+
find_cuda_helper_libs(cublasLt)
|
| 955 |
+
# cusparse showed up in version 3.2
|
| 956 |
+
find_cuda_helper_libs(cusparse)
|
| 957 |
+
find_cuda_helper_libs(curand)
|
| 958 |
+
if (WIN32)
|
| 959 |
+
find_cuda_helper_libs(nvcuvenc)
|
| 960 |
+
find_cuda_helper_libs(nvcuvid)
|
| 961 |
+
endif()
|
| 962 |
+
|
| 963 |
+
# In CUDA 9.0 NPP was nppi was removed
|
| 964 |
+
find_cuda_helper_libs(nppc)
|
| 965 |
+
find_cuda_helper_libs(nppial)
|
| 966 |
+
find_cuda_helper_libs(nppicc)
|
| 967 |
+
find_cuda_helper_libs(nppicom)
|
| 968 |
+
find_cuda_helper_libs(nppidei)
|
| 969 |
+
find_cuda_helper_libs(nppif)
|
| 970 |
+
find_cuda_helper_libs(nppig)
|
| 971 |
+
find_cuda_helper_libs(nppim)
|
| 972 |
+
find_cuda_helper_libs(nppist)
|
| 973 |
+
find_cuda_helper_libs(nppisu)
|
| 974 |
+
find_cuda_helper_libs(nppitc)
|
| 975 |
+
find_cuda_helper_libs(npps)
|
| 976 |
+
set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppial_LIBRARY};${CUDA_nppicc_LIBRARY};${CUDA_nppicom_LIBRARY};${CUDA_nppidei_LIBRARY};${CUDA_nppif_LIBRARY};${CUDA_nppig_LIBRARY};${CUDA_nppim_LIBRARY};${CUDA_nppist_LIBRARY};${CUDA_nppisu_LIBRARY};${CUDA_nppitc_LIBRARY};${CUDA_npps_LIBRARY}")
|
| 977 |
+
# cusolver showed up in version 7.0
|
| 978 |
+
find_cuda_helper_libs(cusolver)
|
| 979 |
+
|
| 980 |
+
if (CUDA_BUILD_EMULATION)
|
| 981 |
+
set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY})
|
| 982 |
+
set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY})
|
| 983 |
+
else()
|
| 984 |
+
set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY})
|
| 985 |
+
set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY})
|
| 986 |
+
endif()
|
| 987 |
+
|
| 988 |
+
########################
|
| 989 |
+
# Look for the SDK stuff. As of CUDA 3.0 NVSDKCUDA_ROOT has been replaced with
|
| 990 |
+
# NVSDKCOMPUTE_ROOT with the old CUDA C contents moved into the C subdirectory
|
| 991 |
+
find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h
|
| 992 |
+
HINTS
|
| 993 |
+
"$ENV{NVSDKCOMPUTE_ROOT}/C"
|
| 994 |
+
ENV NVSDKCUDA_ROOT
|
| 995 |
+
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]"
|
| 996 |
+
PATHS
|
| 997 |
+
"/Developer/GPU\ Computing/C"
|
| 998 |
+
)
|
| 999 |
+
|
| 1000 |
+
# Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the
|
| 1001 |
+
# environment variables.
|
| 1002 |
+
set(CUDA_SDK_SEARCH_PATH
|
| 1003 |
+
"${CUDA_SDK_ROOT_DIR}"
|
| 1004 |
+
"${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2"
|
| 1005 |
+
"${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2"
|
| 1006 |
+
"${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK"
|
| 1007 |
+
"$ENV{HOME}/NVIDIA_CUDA_SDK"
|
| 1008 |
+
"$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX"
|
| 1009 |
+
"/Developer/CUDA"
|
| 1010 |
+
)
|
| 1011 |
+
|
| 1012 |
+
# Example of how to find an include file from the CUDA_SDK_ROOT_DIR
|
| 1013 |
+
|
| 1014 |
+
# find_path(CUDA_CUT_INCLUDE_DIR
|
| 1015 |
+
# cutil.h
|
| 1016 |
+
# PATHS ${CUDA_SDK_SEARCH_PATH}
|
| 1017 |
+
# PATH_SUFFIXES "common/inc"
|
| 1018 |
+
# DOC "Location of cutil.h"
|
| 1019 |
+
# NO_DEFAULT_PATH
|
| 1020 |
+
# )
|
| 1021 |
+
# # Now search system paths
|
| 1022 |
+
# find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h")
|
| 1023 |
+
|
| 1024 |
+
# mark_as_advanced(CUDA_CUT_INCLUDE_DIR)
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
# Example of how to find a library in the CUDA_SDK_ROOT_DIR
|
| 1028 |
+
|
| 1029 |
+
# # cutil library is called cutil64 for 64 bit builds on windows. We don't want
|
| 1030 |
+
# # to get these confused, so we are setting the name based on the word size of
|
| 1031 |
+
# # the build.
|
| 1032 |
+
|
| 1033 |
+
# if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
| 1034 |
+
# set(cuda_cutil_name cutil64)
|
| 1035 |
+
# else()
|
| 1036 |
+
# set(cuda_cutil_name cutil32)
|
| 1037 |
+
# endif()
|
| 1038 |
+
|
| 1039 |
+
# find_library(CUDA_CUT_LIBRARY
|
| 1040 |
+
# NAMES cutil ${cuda_cutil_name}
|
| 1041 |
+
# PATHS ${CUDA_SDK_SEARCH_PATH}
|
| 1042 |
+
# # The new version of the sdk shows up in common/lib, but the old one is in lib
|
| 1043 |
+
# PATH_SUFFIXES "common/lib" "lib"
|
| 1044 |
+
# DOC "Location of cutil library"
|
| 1045 |
+
# NO_DEFAULT_PATH
|
| 1046 |
+
# )
|
| 1047 |
+
# # Now search system paths
|
| 1048 |
+
# find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library")
|
| 1049 |
+
# mark_as_advanced(CUDA_CUT_LIBRARY)
|
| 1050 |
+
# set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY})
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
|
| 1054 |
+
#############################
|
| 1055 |
+
# Check for required components
|
| 1056 |
+
set(CUDA_FOUND TRUE)
|
| 1057 |
+
|
| 1058 |
+
set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL
|
| 1059 |
+
"This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE)
|
| 1060 |
+
set(CUDA_TOOLKIT_TARGET_DIR_INTERNAL "${CUDA_TOOLKIT_TARGET_DIR}" CACHE INTERNAL
|
| 1061 |
+
"This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was set successfully." FORCE)
|
| 1062 |
+
set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL
|
| 1063 |
+
"This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE)
|
| 1064 |
+
|
| 1065 |
+
include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake)
|
| 1066 |
+
|
| 1067 |
+
find_package_handle_standard_args(CUDA
|
| 1068 |
+
REQUIRED_VARS
|
| 1069 |
+
CUDA_TOOLKIT_ROOT_DIR
|
| 1070 |
+
CUDA_NVCC_EXECUTABLE
|
| 1071 |
+
CUDA_INCLUDE_DIRS
|
| 1072 |
+
${CUDA_CUDART_LIBRARY_VAR}
|
| 1073 |
+
VERSION_VAR
|
| 1074 |
+
CUDA_VERSION
|
| 1075 |
+
)
|
| 1076 |
+
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
###############################################################################
|
| 1080 |
+
###############################################################################
|
| 1081 |
+
# Macros
|
| 1082 |
+
###############################################################################
|
| 1083 |
+
###############################################################################
|
| 1084 |
+
|
| 1085 |
+
###############################################################################
|
| 1086 |
+
# Add include directories to pass to the nvcc command.
|
| 1087 |
+
macro(CUDA_INCLUDE_DIRECTORIES)
|
| 1088 |
+
foreach(dir ${ARGN})
|
| 1089 |
+
list(APPEND CUDA_NVCC_INCLUDE_DIRS_USER ${dir})
|
| 1090 |
+
endforeach()
|
| 1091 |
+
endmacro()
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
##############################################################################
|
| 1095 |
+
cuda_find_helper_file(parse_cubin cmake)
|
| 1096 |
+
cuda_find_helper_file(make2cmake cmake)
|
| 1097 |
+
cuda_find_helper_file(run_nvcc cmake)
|
| 1098 |
+
include("${CMAKE_CURRENT_LIST_DIR}/FindCUDA/select_compute_arch.cmake")
|
| 1099 |
+
|
| 1100 |
+
##############################################################################
|
| 1101 |
+
# Separate the OPTIONS out from the sources
|
| 1102 |
+
#
|
| 1103 |
+
macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options)
|
| 1104 |
+
set( ${_sources} )
|
| 1105 |
+
set( ${_cmake_options} )
|
| 1106 |
+
set( ${_options} )
|
| 1107 |
+
set( _found_options FALSE )
|
| 1108 |
+
foreach(arg ${ARGN})
|
| 1109 |
+
if("x${arg}" STREQUAL "xOPTIONS")
|
| 1110 |
+
set( _found_options TRUE )
|
| 1111 |
+
elseif(
|
| 1112 |
+
"x${arg}" STREQUAL "xWIN32" OR
|
| 1113 |
+
"x${arg}" STREQUAL "xMACOSX_BUNDLE" OR
|
| 1114 |
+
"x${arg}" STREQUAL "xEXCLUDE_FROM_ALL" OR
|
| 1115 |
+
"x${arg}" STREQUAL "xSTATIC" OR
|
| 1116 |
+
"x${arg}" STREQUAL "xSHARED" OR
|
| 1117 |
+
"x${arg}" STREQUAL "xMODULE"
|
| 1118 |
+
)
|
| 1119 |
+
list(APPEND ${_cmake_options} ${arg})
|
| 1120 |
+
else()
|
| 1121 |
+
if ( _found_options )
|
| 1122 |
+
list(APPEND ${_options} ${arg})
|
| 1123 |
+
else()
|
| 1124 |
+
# Assume this is a file
|
| 1125 |
+
list(APPEND ${_sources} ${arg})
|
| 1126 |
+
endif()
|
| 1127 |
+
endif()
|
| 1128 |
+
endforeach()
|
| 1129 |
+
endmacro()
|
| 1130 |
+
|
| 1131 |
+
##############################################################################
|
| 1132 |
+
# Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix
|
| 1133 |
+
#
|
| 1134 |
+
macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix)
|
| 1135 |
+
set( _found_config )
|
| 1136 |
+
foreach(arg ${ARGN})
|
| 1137 |
+
# Determine if we are dealing with a perconfiguration flag
|
| 1138 |
+
foreach(config ${CUDA_configuration_types})
|
| 1139 |
+
string(TOUPPER ${config} config_upper)
|
| 1140 |
+
if (arg STREQUAL "${config_upper}")
|
| 1141 |
+
set( _found_config _${arg})
|
| 1142 |
+
# Set arg to nothing to keep it from being processed further
|
| 1143 |
+
set( arg )
|
| 1144 |
+
endif()
|
| 1145 |
+
endforeach()
|
| 1146 |
+
|
| 1147 |
+
if ( arg )
|
| 1148 |
+
list(APPEND ${_option_prefix}${_found_config} "${arg}")
|
| 1149 |
+
endif()
|
| 1150 |
+
endforeach()
|
| 1151 |
+
endmacro()
|
| 1152 |
+
|
| 1153 |
+
##############################################################################
|
| 1154 |
+
# Helper to add the include directory for CUDA only once
|
| 1155 |
+
function(CUDA_ADD_CUDA_INCLUDE_ONCE)
|
| 1156 |
+
get_directory_property(_include_directories INCLUDE_DIRECTORIES)
|
| 1157 |
+
set(_add TRUE)
|
| 1158 |
+
if(_include_directories)
|
| 1159 |
+
foreach(dir ${_include_directories})
|
| 1160 |
+
if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}")
|
| 1161 |
+
set(_add FALSE)
|
| 1162 |
+
endif()
|
| 1163 |
+
endforeach()
|
| 1164 |
+
endif()
|
| 1165 |
+
if(_add)
|
| 1166 |
+
include_directories(${CUDA_INCLUDE_DIRS})
|
| 1167 |
+
endif()
|
| 1168 |
+
endfunction()
|
| 1169 |
+
|
| 1170 |
+
function(CUDA_BUILD_SHARED_LIBRARY shared_flag)
|
| 1171 |
+
set(cmake_args ${ARGN})
|
| 1172 |
+
# If SHARED, MODULE, or STATIC aren't already in the list of arguments, then
|
| 1173 |
+
# add SHARED or STATIC based on the value of BUILD_SHARED_LIBS.
|
| 1174 |
+
list(FIND cmake_args SHARED _cuda_found_SHARED)
|
| 1175 |
+
list(FIND cmake_args MODULE _cuda_found_MODULE)
|
| 1176 |
+
list(FIND cmake_args STATIC _cuda_found_STATIC)
|
| 1177 |
+
if( _cuda_found_SHARED GREATER -1 OR
|
| 1178 |
+
_cuda_found_MODULE GREATER -1 OR
|
| 1179 |
+
_cuda_found_STATIC GREATER -1)
|
| 1180 |
+
set(_cuda_build_shared_libs)
|
| 1181 |
+
else()
|
| 1182 |
+
if (BUILD_SHARED_LIBS)
|
| 1183 |
+
set(_cuda_build_shared_libs SHARED)
|
| 1184 |
+
else()
|
| 1185 |
+
set(_cuda_build_shared_libs STATIC)
|
| 1186 |
+
endif()
|
| 1187 |
+
endif()
|
| 1188 |
+
set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE)
|
| 1189 |
+
endfunction()
|
| 1190 |
+
|
| 1191 |
+
##############################################################################
|
| 1192 |
+
# Helper to avoid clashes of files with the same basename but different paths.
|
| 1193 |
+
# This doesn't attempt to do exactly what CMake internals do, which is to only
|
| 1194 |
+
# add this path when there is a conflict, since by the time a second collision
|
| 1195 |
+
# in names is detected it's already too late to fix the first one. For
|
| 1196 |
+
# consistency sake the relative path will be added to all files.
|
| 1197 |
+
function(CUDA_COMPUTE_BUILD_PATH path build_path)
|
| 1198 |
+
#message("CUDA_COMPUTE_BUILD_PATH([${path}] ${build_path})")
|
| 1199 |
+
# Only deal with CMake style paths from here on out
|
| 1200 |
+
file(TO_CMAKE_PATH "${path}" bpath)
|
| 1201 |
+
if (IS_ABSOLUTE "${bpath}")
|
| 1202 |
+
# Absolute paths are generally unnessary, especially if something like
|
| 1203 |
+
# file(GLOB_RECURSE) is used to pick up the files.
|
| 1204 |
+
|
| 1205 |
+
string(FIND "${bpath}" "${CMAKE_CURRENT_BINARY_DIR}" _binary_dir_pos)
|
| 1206 |
+
if (_binary_dir_pos EQUAL 0)
|
| 1207 |
+
file(RELATIVE_PATH bpath "${CMAKE_CURRENT_BINARY_DIR}" "${bpath}")
|
| 1208 |
+
else()
|
| 1209 |
+
file(RELATIVE_PATH bpath "${CMAKE_CURRENT_SOURCE_DIR}" "${bpath}")
|
| 1210 |
+
endif()
|
| 1211 |
+
endif()
|
| 1212 |
+
|
| 1213 |
+
# This recipe is from cmLocalGenerator::CreateSafeUniqueObjectFileName in the
|
| 1214 |
+
# CMake source.
|
| 1215 |
+
|
| 1216 |
+
# Remove leading /
|
| 1217 |
+
string(REGEX REPLACE "^[/]+" "" bpath "${bpath}")
|
| 1218 |
+
# Avoid absolute paths by removing ':'
|
| 1219 |
+
string(REPLACE ":" "_" bpath "${bpath}")
|
| 1220 |
+
# Avoid relative paths that go up the tree
|
| 1221 |
+
string(REPLACE "../" "__/" bpath "${bpath}")
|
| 1222 |
+
# Avoid spaces
|
| 1223 |
+
string(REPLACE " " "_" bpath "${bpath}")
|
| 1224 |
+
|
| 1225 |
+
# Strip off the filename. I wait until here to do it, since removin the
|
| 1226 |
+
# basename can make a path that looked like path/../basename turn into
|
| 1227 |
+
# path/.. (notice the trailing slash).
|
| 1228 |
+
get_filename_component(bpath "${bpath}" PATH)
|
| 1229 |
+
|
| 1230 |
+
set(${build_path} "${bpath}" PARENT_SCOPE)
|
| 1231 |
+
#message("${build_path} = ${bpath}")
|
| 1232 |
+
endfunction()
|
| 1233 |
+
|
| 1234 |
+
##############################################################################
|
| 1235 |
+
# This helper macro populates the following variables and setups up custom
|
| 1236 |
+
# commands and targets to invoke the nvcc compiler to generate C or PTX source
|
| 1237 |
+
# dependent upon the format parameter. The compiler is invoked once with -M
|
| 1238 |
+
# to generate a dependency file and a second time with -cuda or -ptx to generate
|
| 1239 |
+
# a .cpp or .ptx file.
|
| 1240 |
+
# INPUT:
|
| 1241 |
+
# cuda_target - Target name
|
| 1242 |
+
# format - PTX, CUBIN, FATBIN or OBJ
|
| 1243 |
+
# FILE1 .. FILEN - The remaining arguments are the sources to be wrapped.
|
| 1244 |
+
# OPTIONS - Extra options to NVCC
|
| 1245 |
+
# OUTPUT:
|
| 1246 |
+
# generated_files - List of generated files
|
| 1247 |
+
##############################################################################
|
| 1248 |
+
##############################################################################
|
| 1249 |
+
|
| 1250 |
+
macro(CUDA_WRAP_SRCS cuda_target format generated_files)
|
| 1251 |
+
|
| 1252 |
+
# Put optional arguments in list.
|
| 1253 |
+
set(_argn_list "${ARGN}")
|
| 1254 |
+
# If one of the given optional arguments is "PHONY", make a note of it, then
|
| 1255 |
+
# remove it from the list.
|
| 1256 |
+
list(FIND _argn_list "PHONY" _phony_idx)
|
| 1257 |
+
if("${_phony_idx}" GREATER "-1")
|
| 1258 |
+
set(_target_is_phony true)
|
| 1259 |
+
list(REMOVE_AT _argn_list ${_phony_idx})
|
| 1260 |
+
else()
|
| 1261 |
+
set(_target_is_phony false)
|
| 1262 |
+
endif()
|
| 1263 |
+
|
| 1264 |
+
# If CMake doesn't support separable compilation, complain
|
| 1265 |
+
if(CUDA_SEPARABLE_COMPILATION AND CMAKE_VERSION VERSION_LESS "2.8.10.1")
|
| 1266 |
+
message(SEND_ERROR "CUDA_SEPARABLE_COMPILATION isn't supported for CMake versions less than 2.8.10.1")
|
| 1267 |
+
endif()
|
| 1268 |
+
|
| 1269 |
+
# Set up all the command line flags here, so that they can be overridden on a per target basis.
|
| 1270 |
+
|
| 1271 |
+
set(nvcc_flags "")
|
| 1272 |
+
|
| 1273 |
+
# Emulation if the card isn't present.
|
| 1274 |
+
if (CUDA_BUILD_EMULATION)
|
| 1275 |
+
# Emulation.
|
| 1276 |
+
set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g)
|
| 1277 |
+
else()
|
| 1278 |
+
# Device mode. No flags necessary.
|
| 1279 |
+
endif()
|
| 1280 |
+
|
| 1281 |
+
if(CUDA_HOST_COMPILATION_CPP)
|
| 1282 |
+
set(CUDA_C_OR_CXX CXX)
|
| 1283 |
+
else()
|
| 1284 |
+
message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" )
|
| 1285 |
+
set(CUDA_C_OR_CXX C)
|
| 1286 |
+
endif()
|
| 1287 |
+
|
| 1288 |
+
set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
|
| 1289 |
+
|
| 1290 |
+
if(CUDA_64_BIT_DEVICE_CODE)
|
| 1291 |
+
set(nvcc_flags ${nvcc_flags} -m64)
|
| 1292 |
+
else()
|
| 1293 |
+
set(nvcc_flags ${nvcc_flags} -m32)
|
| 1294 |
+
endif()
|
| 1295 |
+
|
| 1296 |
+
if(CUDA_TARGET_CPU_ARCH)
|
| 1297 |
+
set(nvcc_flags ${nvcc_flags} "--target-cpu-architecture=${CUDA_TARGET_CPU_ARCH}")
|
| 1298 |
+
endif()
|
| 1299 |
+
|
| 1300 |
+
# This needs to be passed in at this stage, because VS needs to fill out the
|
| 1301 |
+
# various macros from within VS. Note that CCBIN is only used if
|
| 1302 |
+
# -ccbin or --compiler-bindir isn't used and CUDA_HOST_COMPILER matches
|
| 1303 |
+
# _CUDA_MSVC_HOST_COMPILER
|
| 1304 |
+
if(CMAKE_GENERATOR MATCHES "Visual Studio")
|
| 1305 |
+
set(ccbin_flags -D "\"CCBIN:PATH=${_CUDA_MSVC_HOST_COMPILER}\"" )
|
| 1306 |
+
else()
|
| 1307 |
+
set(ccbin_flags)
|
| 1308 |
+
endif()
|
| 1309 |
+
|
| 1310 |
+
# Figure out which configure we will use and pass that in as an argument to
|
| 1311 |
+
# the script. We need to defer the decision until compilation time, because
|
| 1312 |
+
# for VS projects we won't know if we are making a debug or release build
|
| 1313 |
+
# until build time.
|
| 1314 |
+
if(CMAKE_GENERATOR MATCHES "Visual Studio")
|
| 1315 |
+
set( CUDA_build_configuration "$(ConfigurationName)" )
|
| 1316 |
+
else()
|
| 1317 |
+
set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}")
|
| 1318 |
+
endif()
|
| 1319 |
+
|
| 1320 |
+
# Initialize our list of includes with the user ones followed by the CUDA system ones.
|
| 1321 |
+
set(CUDA_NVCC_INCLUDE_DIRS ${CUDA_NVCC_INCLUDE_DIRS_USER} "${CUDA_INCLUDE_DIRS}")
|
| 1322 |
+
if(_target_is_phony)
|
| 1323 |
+
# If the passed in target name isn't a real target (i.e., this is from a call to one of the
|
| 1324 |
+
# cuda_compile_* functions), need to query directory properties to get include directories
|
| 1325 |
+
# and compile definitions.
|
| 1326 |
+
get_directory_property(_dir_include_dirs INCLUDE_DIRECTORIES)
|
| 1327 |
+
get_directory_property(_dir_compile_defs COMPILE_DEFINITIONS)
|
| 1328 |
+
|
| 1329 |
+
list(APPEND CUDA_NVCC_INCLUDE_DIRS "${_dir_include_dirs}")
|
| 1330 |
+
set(CUDA_NVCC_COMPILE_DEFINITIONS "${_dir_compile_defs}")
|
| 1331 |
+
else()
|
| 1332 |
+
# Append the include directories for this target via generator expression, which is
|
| 1333 |
+
# expanded by the FILE(GENERATE) call below. This generator expression captures all
|
| 1334 |
+
# include dirs set by the user, whether via directory properties or target properties
|
| 1335 |
+
list(APPEND CUDA_NVCC_INCLUDE_DIRS "$<TARGET_PROPERTY:${cuda_target},INCLUDE_DIRECTORIES>")
|
| 1336 |
+
|
| 1337 |
+
# Do the same thing with compile definitions
|
| 1338 |
+
set(CUDA_NVCC_COMPILE_DEFINITIONS "$<TARGET_PROPERTY:${cuda_target},COMPILE_DEFINITIONS>")
|
| 1339 |
+
endif()
|
| 1340 |
+
|
| 1341 |
+
|
| 1342 |
+
# Reset these variables
|
| 1343 |
+
set(CUDA_WRAP_OPTION_NVCC_FLAGS)
|
| 1344 |
+
foreach(config ${CUDA_configuration_types})
|
| 1345 |
+
string(TOUPPER ${config} config_upper)
|
| 1346 |
+
set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper})
|
| 1347 |
+
endforeach()
|
| 1348 |
+
|
| 1349 |
+
CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${_argn_list})
|
| 1350 |
+
CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options})
|
| 1351 |
+
|
| 1352 |
+
# Figure out if we are building a shared library. BUILD_SHARED_LIBS is
|
| 1353 |
+
# respected in CUDA_ADD_LIBRARY.
|
| 1354 |
+
set(_cuda_build_shared_libs FALSE)
|
| 1355 |
+
# SHARED, MODULE
|
| 1356 |
+
list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED)
|
| 1357 |
+
list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE)
|
| 1358 |
+
if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1)
|
| 1359 |
+
set(_cuda_build_shared_libs TRUE)
|
| 1360 |
+
endif()
|
| 1361 |
+
# STATIC
|
| 1362 |
+
list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC)
|
| 1363 |
+
if(_cuda_found_STATIC GREATER -1)
|
| 1364 |
+
set(_cuda_build_shared_libs FALSE)
|
| 1365 |
+
endif()
|
| 1366 |
+
|
| 1367 |
+
# CUDA_HOST_FLAGS
|
| 1368 |
+
if(_cuda_build_shared_libs)
|
| 1369 |
+
# If we are setting up code for a shared library, then we need to add extra flags for
|
| 1370 |
+
# compiling objects for shared libraries.
|
| 1371 |
+
set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS})
|
| 1372 |
+
else()
|
| 1373 |
+
set(CUDA_HOST_SHARED_FLAGS)
|
| 1374 |
+
endif()
|
| 1375 |
+
|
| 1376 |
+
macro(_filter_blocklisted_host_flags CUDA_FLAGS)
|
| 1377 |
+
string(REGEX REPLACE "[ \t]+" ";" ${CUDA_FLAGS} "${${CUDA_FLAGS}}")
|
| 1378 |
+
foreach(_blacklisted ${CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST})
|
| 1379 |
+
list(REMOVE_ITEM ${CUDA_FLAGS} "${_blacklisted}")
|
| 1380 |
+
endforeach()
|
| 1381 |
+
string(REPLACE ";" " " ${CUDA_FLAGS} "${${CUDA_FLAGS}}")
|
| 1382 |
+
endmacro()
|
| 1383 |
+
|
| 1384 |
+
# Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We
|
| 1385 |
+
# always need to set the SHARED_FLAGS, though.
|
| 1386 |
+
if(CUDA_PROPAGATE_HOST_FLAGS)
|
| 1387 |
+
set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}")
|
| 1388 |
+
_filter_blocklisted_host_flags(_cuda_C_FLAGS)
|
| 1389 |
+
set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${_cuda_C_FLAGS} ${CUDA_HOST_SHARED_FLAGS})")
|
| 1390 |
+
else()
|
| 1391 |
+
set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS})")
|
| 1392 |
+
endif()
|
| 1393 |
+
|
| 1394 |
+
set(_cuda_nvcc_flags_config "# Build specific configuration flags")
|
| 1395 |
+
# Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake
|
| 1396 |
+
foreach(config ${CUDA_configuration_types})
|
| 1397 |
+
string(TOUPPER ${config} config_upper)
|
| 1398 |
+
# CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS
|
| 1399 |
+
# we convert the strings to lists (like we want).
|
| 1400 |
+
|
| 1401 |
+
if(CUDA_PROPAGATE_HOST_FLAGS)
|
| 1402 |
+
# nvcc chokes on -g3 in versions previous to 3.0, so replace it with -g
|
| 1403 |
+
set(_cuda_fix_g3 FALSE)
|
| 1404 |
+
|
| 1405 |
+
set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
|
| 1406 |
+
_filter_blocklisted_host_flags(_cuda_C_FLAGS)
|
| 1407 |
+
if(_cuda_fix_g3)
|
| 1408 |
+
string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${_cuda_C_FLAGS}")
|
| 1409 |
+
endif()
|
| 1410 |
+
|
| 1411 |
+
string(APPEND _cuda_host_flags "\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})")
|
| 1412 |
+
endif()
|
| 1413 |
+
|
| 1414 |
+
# Note that if we ever want CUDA_NVCC_FLAGS_<CONFIG> to be string (instead of a list
|
| 1415 |
+
# like it is currently), we can remove the quotes around the
|
| 1416 |
+
# ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_<CONFIG> variable.
|
| 1417 |
+
string(APPEND _cuda_nvcc_flags_config "\nset(CUDA_NVCC_FLAGS_${config_upper} ${CUDA_NVCC_FLAGS_${config_upper}} ;; ${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}})")
|
| 1418 |
+
endforeach()
|
| 1419 |
+
|
| 1420 |
+
# Process the C++14 flag. If the host sets the flag, we need to add it to nvcc and
|
| 1421 |
+
# remove it from the host. This is because -Xcompile -std=c++ will choke nvcc (it uses
|
| 1422 |
+
# the C preprocessor). In order to get this to work correctly, we need to use nvcc's
|
| 1423 |
+
# specific c++14 flag.
|
| 1424 |
+
if( "${_cuda_host_flags}" MATCHES "-std=c\\+\\+11")
|
| 1425 |
+
# Add the c++14 flag to nvcc if it isn't already present. Note that we only look at
|
| 1426 |
+
# the main flag instead of the configuration specific flags.
|
| 1427 |
+
if( NOT "${CUDA_NVCC_FLAGS}" MATCHES "-std=c\\+\\+14" )
|
| 1428 |
+
list(APPEND nvcc_flags --std c++14)
|
| 1429 |
+
endif()
|
| 1430 |
+
string(REGEX REPLACE "[-]+std=c\\+\\+14" "" _cuda_host_flags "${_cuda_host_flags}")
|
| 1431 |
+
endif()
|
| 1432 |
+
|
| 1433 |
+
if(_cuda_build_shared_libs)
|
| 1434 |
+
list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS")
|
| 1435 |
+
endif()
|
| 1436 |
+
|
| 1437 |
+
# Reset the output variable
|
| 1438 |
+
set(_cuda_wrap_generated_files "")
|
| 1439 |
+
|
| 1440 |
+
# Iterate over the macro arguments and create custom
|
| 1441 |
+
# commands for all the .cu files.
|
| 1442 |
+
foreach(file ${_argn_list})
|
| 1443 |
+
# Ignore any file marked as a HEADER_FILE_ONLY
|
| 1444 |
+
get_source_file_property(_is_header ${file} HEADER_FILE_ONLY)
|
| 1445 |
+
# Allow per source file overrides of the format. Also allows compiling non-.cu files.
|
| 1446 |
+
get_source_file_property(_cuda_source_format ${file} CUDA_SOURCE_PROPERTY_FORMAT)
|
| 1447 |
+
if((${file} MATCHES "\\.cu$" OR _cuda_source_format) AND NOT _is_header)
|
| 1448 |
+
|
| 1449 |
+
if(NOT _cuda_source_format)
|
| 1450 |
+
set(_cuda_source_format ${format})
|
| 1451 |
+
endif()
|
| 1452 |
+
# If file isn't a .cu file, we need to tell nvcc to treat it as such.
|
| 1453 |
+
if(NOT file MATCHES "\\.cu$")
|
| 1454 |
+
set(cuda_language_flag -x=cu)
|
| 1455 |
+
else()
|
| 1456 |
+
set(cuda_language_flag)
|
| 1457 |
+
endif()
|
| 1458 |
+
|
| 1459 |
+
if( ${_cuda_source_format} MATCHES "OBJ")
|
| 1460 |
+
set( cuda_compile_to_external_module OFF )
|
| 1461 |
+
else()
|
| 1462 |
+
set( cuda_compile_to_external_module ON )
|
| 1463 |
+
if( ${_cuda_source_format} MATCHES "PTX" )
|
| 1464 |
+
set( cuda_compile_to_external_module_type "ptx" )
|
| 1465 |
+
elseif( ${_cuda_source_format} MATCHES "CUBIN")
|
| 1466 |
+
set( cuda_compile_to_external_module_type "cubin" )
|
| 1467 |
+
elseif( ${_cuda_source_format} MATCHES "FATBIN")
|
| 1468 |
+
set( cuda_compile_to_external_module_type "fatbin" )
|
| 1469 |
+
else()
|
| 1470 |
+
message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS or set with CUDA_SOURCE_PROPERTY_FORMAT file property for file '${file}': '${_cuda_source_format}'. Use OBJ, PTX, CUBIN or FATBIN.")
|
| 1471 |
+
endif()
|
| 1472 |
+
endif()
|
| 1473 |
+
|
| 1474 |
+
if(cuda_compile_to_external_module)
|
| 1475 |
+
# Don't use any of the host compilation flags for PTX targets.
|
| 1476 |
+
set(CUDA_HOST_FLAGS)
|
| 1477 |
+
set(CUDA_NVCC_FLAGS_CONFIG)
|
| 1478 |
+
else()
|
| 1479 |
+
set(CUDA_HOST_FLAGS ${_cuda_host_flags})
|
| 1480 |
+
set(CUDA_NVCC_FLAGS_CONFIG ${_cuda_nvcc_flags_config})
|
| 1481 |
+
endif()
|
| 1482 |
+
|
| 1483 |
+
# Determine output directory
|
| 1484 |
+
cuda_compute_build_path("${file}" cuda_build_path)
|
| 1485 |
+
set(cuda_compile_intermediate_directory "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${cuda_build_path}")
|
| 1486 |
+
if(CUDA_GENERATED_OUTPUT_DIR)
|
| 1487 |
+
set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}")
|
| 1488 |
+
else()
|
| 1489 |
+
if ( cuda_compile_to_external_module )
|
| 1490 |
+
set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}")
|
| 1491 |
+
else()
|
| 1492 |
+
set(cuda_compile_output_dir "${cuda_compile_intermediate_directory}")
|
| 1493 |
+
endif()
|
| 1494 |
+
endif()
|
| 1495 |
+
|
| 1496 |
+
# Add a custom target to generate a c or ptx file. ######################
|
| 1497 |
+
|
| 1498 |
+
get_filename_component( basename ${file} NAME )
|
| 1499 |
+
if( cuda_compile_to_external_module )
|
| 1500 |
+
set(generated_file_path "${cuda_compile_output_dir}")
|
| 1501 |
+
set(generated_file_basename "${cuda_target}_generated_${basename}.${cuda_compile_to_external_module_type}")
|
| 1502 |
+
set(format_flag "-${cuda_compile_to_external_module_type}")
|
| 1503 |
+
file(MAKE_DIRECTORY "${cuda_compile_output_dir}")
|
| 1504 |
+
else()
|
| 1505 |
+
set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}")
|
| 1506 |
+
set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}")
|
| 1507 |
+
if(CUDA_SEPARABLE_COMPILATION)
|
| 1508 |
+
set(format_flag "-dc")
|
| 1509 |
+
else()
|
| 1510 |
+
set(format_flag "-c")
|
| 1511 |
+
endif()
|
| 1512 |
+
endif()
|
| 1513 |
+
|
| 1514 |
+
# Set all of our file names. Make sure that whatever filenames that have
|
| 1515 |
+
# generated_file_path in them get passed in through as a command line
|
| 1516 |
+
# argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time
|
| 1517 |
+
# instead of configure time.
|
| 1518 |
+
set(generated_file "${generated_file_path}/${generated_file_basename}")
|
| 1519 |
+
set(cmake_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.depend")
|
| 1520 |
+
set(NVCC_generated_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.NVCC-depend")
|
| 1521 |
+
set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt")
|
| 1522 |
+
set(custom_target_script_pregen "${cuda_compile_intermediate_directory}/${generated_file_basename}.cmake.pre-gen")
|
| 1523 |
+
set(custom_target_script "${cuda_compile_intermediate_directory}/${generated_file_basename}$<$<BOOL:$<CONFIG>>:.$<CONFIG>>.cmake")
|
| 1524 |
+
|
| 1525 |
+
# Setup properties for obj files:
|
| 1526 |
+
if( NOT cuda_compile_to_external_module )
|
| 1527 |
+
set_source_files_properties("${generated_file}"
|
| 1528 |
+
PROPERTIES
|
| 1529 |
+
EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked.
|
| 1530 |
+
)
|
| 1531 |
+
endif()
|
| 1532 |
+
|
| 1533 |
+
# Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path.
|
| 1534 |
+
get_filename_component(file_path "${file}" PATH)
|
| 1535 |
+
if(IS_ABSOLUTE "${file_path}")
|
| 1536 |
+
set(source_file "${file}")
|
| 1537 |
+
else()
|
| 1538 |
+
set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}")
|
| 1539 |
+
endif()
|
| 1540 |
+
|
| 1541 |
+
if( NOT cuda_compile_to_external_module AND CUDA_SEPARABLE_COMPILATION)
|
| 1542 |
+
list(APPEND ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS "${generated_file}")
|
| 1543 |
+
endif()
|
| 1544 |
+
|
| 1545 |
+
# Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND #######
|
| 1546 |
+
cuda_include_nvcc_dependencies(${cmake_dependency_file})
|
| 1547 |
+
|
| 1548 |
+
# Convenience string for output #########################################
|
| 1549 |
+
if(CUDA_BUILD_EMULATION)
|
| 1550 |
+
set(cuda_build_type "Emulation")
|
| 1551 |
+
else()
|
| 1552 |
+
set(cuda_build_type "Device")
|
| 1553 |
+
endif()
|
| 1554 |
+
|
| 1555 |
+
# Build the NVCC made dependency file ###################################
|
| 1556 |
+
set(build_cubin OFF)
|
| 1557 |
+
if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
|
| 1558 |
+
if ( NOT cuda_compile_to_external_module )
|
| 1559 |
+
set ( build_cubin ON )
|
| 1560 |
+
endif()
|
| 1561 |
+
endif()
|
| 1562 |
+
|
| 1563 |
+
# Configure the build script
|
| 1564 |
+
configure_file("${CUDA_run_nvcc}" "${custom_target_script_pregen}" @ONLY)
|
| 1565 |
+
file(GENERATE
|
| 1566 |
+
OUTPUT "${custom_target_script}"
|
| 1567 |
+
INPUT "${custom_target_script_pregen}"
|
| 1568 |
+
)
|
| 1569 |
+
|
| 1570 |
+
# So if a user specifies the same cuda file as input more than once, you
|
| 1571 |
+
# can have bad things happen with dependencies. Here we check an option
|
| 1572 |
+
# to see if this is the behavior they want.
|
| 1573 |
+
if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
|
| 1574 |
+
set(main_dep MAIN_DEPENDENCY ${source_file})
|
| 1575 |
+
else()
|
| 1576 |
+
set(main_dep DEPENDS ${source_file})
|
| 1577 |
+
endif()
|
| 1578 |
+
|
| 1579 |
+
if(CUDA_VERBOSE_BUILD)
|
| 1580 |
+
set(verbose_output ON)
|
| 1581 |
+
elseif(CMAKE_GENERATOR MATCHES "Makefiles")
|
| 1582 |
+
set(verbose_output "$(VERBOSE)")
|
| 1583 |
+
# This condition lets us also turn on verbose output when someone
|
| 1584 |
+
# specifies CMAKE_VERBOSE_MAKEFILE, even if the generator isn't
|
| 1585 |
+
# the Makefiles generator (this is important for us, Ninja users.)
|
| 1586 |
+
elseif(CMAKE_VERBOSE_MAKEFILE)
|
| 1587 |
+
set(verbose_output ON)
|
| 1588 |
+
else()
|
| 1589 |
+
set(verbose_output OFF)
|
| 1590 |
+
endif()
|
| 1591 |
+
|
| 1592 |
+
# Create up the comment string
|
| 1593 |
+
file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}")
|
| 1594 |
+
if(cuda_compile_to_external_module)
|
| 1595 |
+
set(cuda_build_comment_string "Building NVCC ${cuda_compile_to_external_module_type} file ${generated_file_relative_path}")
|
| 1596 |
+
else()
|
| 1597 |
+
set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}")
|
| 1598 |
+
endif()
|
| 1599 |
+
|
| 1600 |
+
set(_verbatim VERBATIM)
|
| 1601 |
+
if(ccbin_flags MATCHES "\\$\\(VCInstallDir\\)")
|
| 1602 |
+
set(_verbatim "")
|
| 1603 |
+
endif()
|
| 1604 |
+
|
| 1605 |
+
# Build the generated file and dependency file ##########################
|
| 1606 |
+
add_custom_command(
|
| 1607 |
+
OUTPUT ${generated_file}
|
| 1608 |
+
# These output files depend on the source_file and the contents of cmake_dependency_file
|
| 1609 |
+
${main_dep}
|
| 1610 |
+
DEPENDS ${CUDA_NVCC_DEPEND}
|
| 1611 |
+
DEPENDS ${custom_target_script}
|
| 1612 |
+
# Make sure the output directory exists before trying to write to it.
|
| 1613 |
+
COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}"
|
| 1614 |
+
COMMAND ${CMAKE_COMMAND} ARGS
|
| 1615 |
+
-D verbose:BOOL=${verbose_output}
|
| 1616 |
+
${ccbin_flags}
|
| 1617 |
+
-D build_configuration:STRING=${CUDA_build_configuration}
|
| 1618 |
+
-D "generated_file:STRING=${generated_file}"
|
| 1619 |
+
-D "generated_cubin_file:STRING=${generated_cubin_file}"
|
| 1620 |
+
-P "${custom_target_script}"
|
| 1621 |
+
WORKING_DIRECTORY "${cuda_compile_intermediate_directory}"
|
| 1622 |
+
COMMENT "${cuda_build_comment_string}"
|
| 1623 |
+
${_verbatim}
|
| 1624 |
+
)
|
| 1625 |
+
|
| 1626 |
+
# Make sure the build system knows the file is generated.
|
| 1627 |
+
set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE)
|
| 1628 |
+
|
| 1629 |
+
list(APPEND _cuda_wrap_generated_files ${generated_file})
|
| 1630 |
+
|
| 1631 |
+
# Add the other files that we want cmake to clean on a cleanup ##########
|
| 1632 |
+
list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}")
|
| 1633 |
+
list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES)
|
| 1634 |
+
set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
|
| 1635 |
+
|
| 1636 |
+
endif()
|
| 1637 |
+
endforeach()
|
| 1638 |
+
|
| 1639 |
+
# Set the return parameter
|
| 1640 |
+
set(${generated_files} ${_cuda_wrap_generated_files})
|
| 1641 |
+
endmacro()
|
| 1642 |
+
|
| 1643 |
+
function(_cuda_get_important_host_flags important_flags flag_string)
|
| 1644 |
+
if(CMAKE_GENERATOR MATCHES "Visual Studio")
|
| 1645 |
+
string(REGEX MATCHALL "/M[DT][d]?" flags "${flag_string}")
|
| 1646 |
+
list(APPEND ${important_flags} ${flags})
|
| 1647 |
+
else()
|
| 1648 |
+
string(REGEX MATCHALL "-fPIC" flags "${flag_string}")
|
| 1649 |
+
list(APPEND ${important_flags} ${flags})
|
| 1650 |
+
endif()
|
| 1651 |
+
set(${important_flags} ${${important_flags}} PARENT_SCOPE)
|
| 1652 |
+
endfunction()
|
| 1653 |
+
|
| 1654 |
+
###############################################################################
|
| 1655 |
+
###############################################################################
|
| 1656 |
+
# Separable Compilation Link
|
| 1657 |
+
###############################################################################
|
| 1658 |
+
###############################################################################
|
| 1659 |
+
|
| 1660 |
+
# Compute the filename to be used by CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS
|
| 1661 |
+
function(CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME output_file_var cuda_target object_files)
|
| 1662 |
+
if (object_files)
|
| 1663 |
+
set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
|
| 1664 |
+
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${CMAKE_CFG_INTDIR}/${cuda_target}_intermediate_link${generated_extension}")
|
| 1665 |
+
else()
|
| 1666 |
+
set(output_file)
|
| 1667 |
+
endif()
|
| 1668 |
+
|
| 1669 |
+
set(${output_file_var} "${output_file}" PARENT_SCOPE)
|
| 1670 |
+
endfunction()
|
| 1671 |
+
|
| 1672 |
+
# Setup the build rule for the separable compilation intermediate link file.
|
| 1673 |
+
function(CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS output_file cuda_target options object_files)
|
| 1674 |
+
if (object_files)
|
| 1675 |
+
|
| 1676 |
+
set_source_files_properties("${output_file}"
|
| 1677 |
+
PROPERTIES
|
| 1678 |
+
EXTERNAL_OBJECT TRUE # This is an object file not to be compiled, but only
|
| 1679 |
+
# be linked.
|
| 1680 |
+
GENERATED TRUE # This file is generated during the build
|
| 1681 |
+
)
|
| 1682 |
+
|
| 1683 |
+
# For now we are ignoring all the configuration specific flags.
|
| 1684 |
+
set(nvcc_flags)
|
| 1685 |
+
CUDA_PARSE_NVCC_OPTIONS(nvcc_flags ${options})
|
| 1686 |
+
if(CUDA_64_BIT_DEVICE_CODE)
|
| 1687 |
+
list(APPEND nvcc_flags -m64)
|
| 1688 |
+
else()
|
| 1689 |
+
list(APPEND nvcc_flags -m32)
|
| 1690 |
+
endif()
|
| 1691 |
+
# If -ccbin, --compiler-bindir has been specified, don't do anything. Otherwise add it here.
|
| 1692 |
+
list( FIND nvcc_flags "-ccbin" ccbin_found0 )
|
| 1693 |
+
list( FIND nvcc_flags "--compiler-bindir" ccbin_found1 )
|
| 1694 |
+
if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER )
|
| 1695 |
+
# Match VERBATIM check below.
|
| 1696 |
+
if(CUDA_HOST_COMPILER MATCHES "\\$\\(VCInstallDir\\)")
|
| 1697 |
+
list(APPEND nvcc_flags -ccbin "\"${CUDA_HOST_COMPILER}\"")
|
| 1698 |
+
else()
|
| 1699 |
+
list(APPEND nvcc_flags -ccbin "${CUDA_HOST_COMPILER}")
|
| 1700 |
+
endif()
|
| 1701 |
+
endif()
|
| 1702 |
+
|
| 1703 |
+
# Create a list of flags specified by CUDA_NVCC_FLAGS_${CONFIG} and CMAKE_${CUDA_C_OR_CXX}_FLAGS*
|
| 1704 |
+
set(config_specific_flags)
|
| 1705 |
+
set(flags)
|
| 1706 |
+
foreach(config ${CUDA_configuration_types})
|
| 1707 |
+
string(TOUPPER ${config} config_upper)
|
| 1708 |
+
# Add config specific flags
|
| 1709 |
+
foreach(f ${CUDA_NVCC_FLAGS_${config_upper}})
|
| 1710 |
+
list(APPEND config_specific_flags $<$<CONFIG:${config}>:${f}>)
|
| 1711 |
+
endforeach()
|
| 1712 |
+
set(important_host_flags)
|
| 1713 |
+
_cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
|
| 1714 |
+
foreach(f ${important_host_flags})
|
| 1715 |
+
list(APPEND flags $<$<CONFIG:${config}>:-Xcompiler> $<$<CONFIG:${config}>:${f}>)
|
| 1716 |
+
endforeach()
|
| 1717 |
+
endforeach()
|
| 1718 |
+
# Add CMAKE_${CUDA_C_OR_CXX}_FLAGS
|
| 1719 |
+
set(important_host_flags)
|
| 1720 |
+
_cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}")
|
| 1721 |
+
foreach(f ${important_host_flags})
|
| 1722 |
+
list(APPEND flags -Xcompiler ${f})
|
| 1723 |
+
endforeach()
|
| 1724 |
+
|
| 1725 |
+
# Add our general CUDA_NVCC_FLAGS with the configuration specifig flags
|
| 1726 |
+
set(nvcc_flags ${CUDA_NVCC_FLAGS} ${config_specific_flags} ${nvcc_flags})
|
| 1727 |
+
|
| 1728 |
+
file(RELATIVE_PATH output_file_relative_path "${CMAKE_BINARY_DIR}" "${output_file}")
|
| 1729 |
+
|
| 1730 |
+
# Some generators don't handle the multiple levels of custom command
|
| 1731 |
+
# dependencies correctly (obj1 depends on file1, obj2 depends on obj1), so
|
| 1732 |
+
# we work around that issue by compiling the intermediate link object as a
|
| 1733 |
+
# pre-link custom command in that situation.
|
| 1734 |
+
set(do_obj_build_rule TRUE)
|
| 1735 |
+
if (MSVC_VERSION GREATER 1599 AND MSVC_VERSION LESS 1800)
|
| 1736 |
+
# VS 2010 and 2012 have this problem.
|
| 1737 |
+
set(do_obj_build_rule FALSE)
|
| 1738 |
+
endif()
|
| 1739 |
+
|
| 1740 |
+
set(_verbatim VERBATIM)
|
| 1741 |
+
if(nvcc_flags MATCHES "\\$\\(VCInstallDir\\)")
|
| 1742 |
+
set(_verbatim "")
|
| 1743 |
+
endif()
|
| 1744 |
+
|
| 1745 |
+
if (do_obj_build_rule)
|
| 1746 |
+
add_custom_command(
|
| 1747 |
+
OUTPUT ${output_file}
|
| 1748 |
+
DEPENDS ${object_files}
|
| 1749 |
+
COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} -dlink ${object_files} -o ${output_file}
|
| 1750 |
+
${flags}
|
| 1751 |
+
COMMENT "Building NVCC intermediate link file ${output_file_relative_path}"
|
| 1752 |
+
COMMAND_EXPAND_LISTS
|
| 1753 |
+
${_verbatim}
|
| 1754 |
+
)
|
| 1755 |
+
else()
|
| 1756 |
+
get_filename_component(output_file_dir "${output_file}" DIRECTORY)
|
| 1757 |
+
add_custom_command(
|
| 1758 |
+
TARGET ${cuda_target}
|
| 1759 |
+
PRE_LINK
|
| 1760 |
+
COMMAND ${CMAKE_COMMAND} -E echo "Building NVCC intermediate link file ${output_file_relative_path}"
|
| 1761 |
+
COMMAND ${CMAKE_COMMAND} -E make_directory "${output_file_dir}"
|
| 1762 |
+
COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} ${flags} -dlink ${object_files} -o "${output_file}"
|
| 1763 |
+
COMMAND_EXPAND_LISTS
|
| 1764 |
+
${_verbatim}
|
| 1765 |
+
)
|
| 1766 |
+
endif()
|
| 1767 |
+
endif()
|
| 1768 |
+
endfunction()
|
| 1769 |
+
|
| 1770 |
+
###############################################################################
|
| 1771 |
+
###############################################################################
|
| 1772 |
+
# ADD LIBRARY
|
| 1773 |
+
###############################################################################
|
| 1774 |
+
###############################################################################
|
| 1775 |
+
macro(CUDA_ADD_LIBRARY cuda_target)
|
| 1776 |
+
|
| 1777 |
+
CUDA_ADD_CUDA_INCLUDE_ONCE()
|
| 1778 |
+
|
| 1779 |
+
# Separate the sources from the options
|
| 1780 |
+
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
|
| 1781 |
+
CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN})
|
| 1782 |
+
# Create custom commands and targets for each file.
|
| 1783 |
+
CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources}
|
| 1784 |
+
${_cmake_options} ${_cuda_shared_flag}
|
| 1785 |
+
OPTIONS ${_options} )
|
| 1786 |
+
|
| 1787 |
+
# Compute the file name of the intermedate link file used for separable
|
| 1788 |
+
# compilation.
|
| 1789 |
+
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
|
| 1790 |
+
|
| 1791 |
+
# Add the library.
|
| 1792 |
+
add_library(${cuda_target} ${_cmake_options}
|
| 1793 |
+
${_generated_files}
|
| 1794 |
+
${_sources}
|
| 1795 |
+
${link_file}
|
| 1796 |
+
)
|
| 1797 |
+
|
| 1798 |
+
# Add a link phase for the separable compilation if it has been enabled. If
|
| 1799 |
+
# it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS
|
| 1800 |
+
# variable will have been defined.
|
| 1801 |
+
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
|
| 1802 |
+
|
| 1803 |
+
target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD}
|
| 1804 |
+
${CUDA_LIBRARIES}
|
| 1805 |
+
)
|
| 1806 |
+
|
| 1807 |
+
if(CUDA_SEPARABLE_COMPILATION)
|
| 1808 |
+
target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD}
|
| 1809 |
+
${CUDA_cudadevrt_LIBRARY}
|
| 1810 |
+
)
|
| 1811 |
+
endif()
|
| 1812 |
+
|
| 1813 |
+
# We need to set the linker language based on what the expected generated file
|
| 1814 |
+
# would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
|
| 1815 |
+
set_target_properties(${cuda_target}
|
| 1816 |
+
PROPERTIES
|
| 1817 |
+
LINKER_LANGUAGE ${CUDA_C_OR_CXX}
|
| 1818 |
+
)
|
| 1819 |
+
|
| 1820 |
+
endmacro()
|
| 1821 |
+
|
| 1822 |
+
|
| 1823 |
+
###############################################################################
|
| 1824 |
+
###############################################################################
|
| 1825 |
+
# ADD EXECUTABLE
|
| 1826 |
+
###############################################################################
|
| 1827 |
+
###############################################################################
|
| 1828 |
+
macro(CUDA_ADD_EXECUTABLE cuda_target)
|
| 1829 |
+
|
| 1830 |
+
CUDA_ADD_CUDA_INCLUDE_ONCE()
|
| 1831 |
+
|
| 1832 |
+
# Separate the sources from the options
|
| 1833 |
+
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
|
| 1834 |
+
# Create custom commands and targets for each file.
|
| 1835 |
+
CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} )
|
| 1836 |
+
|
| 1837 |
+
# Compute the file name of the intermedate link file used for separable
|
| 1838 |
+
# compilation.
|
| 1839 |
+
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
|
| 1840 |
+
|
| 1841 |
+
# Add the library.
|
| 1842 |
+
add_executable(${cuda_target} ${_cmake_options}
|
| 1843 |
+
${_generated_files}
|
| 1844 |
+
${_sources}
|
| 1845 |
+
${link_file}
|
| 1846 |
+
)
|
| 1847 |
+
|
| 1848 |
+
# Add a link phase for the separable compilation if it has been enabled. If
|
| 1849 |
+
# it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS
|
| 1850 |
+
# variable will have been defined.
|
| 1851 |
+
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
|
| 1852 |
+
|
| 1853 |
+
target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD}
|
| 1854 |
+
${CUDA_LIBRARIES}
|
| 1855 |
+
)
|
| 1856 |
+
|
| 1857 |
+
# We need to set the linker language based on what the expected generated file
|
| 1858 |
+
# would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
|
| 1859 |
+
set_target_properties(${cuda_target}
|
| 1860 |
+
PROPERTIES
|
| 1861 |
+
LINKER_LANGUAGE ${CUDA_C_OR_CXX}
|
| 1862 |
+
)
|
| 1863 |
+
|
| 1864 |
+
endmacro()
|
| 1865 |
+
|
| 1866 |
+
|
| 1867 |
+
###############################################################################
|
| 1868 |
+
###############################################################################
|
| 1869 |
+
# (Internal) helper for manually added cuda source files with specific targets
|
| 1870 |
+
###############################################################################
|
| 1871 |
+
###############################################################################
|
| 1872 |
+
macro(cuda_compile_base cuda_target format generated_files)
|
| 1873 |
+
# Update a counter in this directory, to keep phony target names unique.
|
| 1874 |
+
set(_cuda_target "${cuda_target}")
|
| 1875 |
+
get_property(_counter DIRECTORY PROPERTY _cuda_internal_phony_counter)
|
| 1876 |
+
if(_counter)
|
| 1877 |
+
math(EXPR _counter "${_counter} + 1")
|
| 1878 |
+
else()
|
| 1879 |
+
set(_counter 1)
|
| 1880 |
+
endif()
|
| 1881 |
+
string(APPEND _cuda_target "_${_counter}")
|
| 1882 |
+
set_property(DIRECTORY PROPERTY _cuda_internal_phony_counter ${_counter})
|
| 1883 |
+
|
| 1884 |
+
# Separate the sources from the options
|
| 1885 |
+
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
|
| 1886 |
+
|
| 1887 |
+
# Create custom commands and targets for each file.
|
| 1888 |
+
CUDA_WRAP_SRCS( ${_cuda_target} ${format} _generated_files ${_sources}
|
| 1889 |
+
${_cmake_options} OPTIONS ${_options} PHONY)
|
| 1890 |
+
|
| 1891 |
+
set( ${generated_files} ${_generated_files})
|
| 1892 |
+
|
| 1893 |
+
endmacro()
|
| 1894 |
+
|
| 1895 |
+
###############################################################################
|
| 1896 |
+
###############################################################################
|
| 1897 |
+
# CUDA COMPILE
|
| 1898 |
+
###############################################################################
|
| 1899 |
+
###############################################################################
|
| 1900 |
+
macro(CUDA_COMPILE generated_files)
|
| 1901 |
+
cuda_compile_base(cuda_compile OBJ ${generated_files} ${ARGN})
|
| 1902 |
+
endmacro()
|
| 1903 |
+
|
| 1904 |
+
###############################################################################
|
| 1905 |
+
###############################################################################
|
| 1906 |
+
# CUDA COMPILE PTX
|
| 1907 |
+
###############################################################################
|
| 1908 |
+
###############################################################################
|
| 1909 |
+
macro(CUDA_COMPILE_PTX generated_files)
|
| 1910 |
+
cuda_compile_base(cuda_compile_ptx PTX ${generated_files} ${ARGN})
|
| 1911 |
+
endmacro()
|
| 1912 |
+
|
| 1913 |
+
###############################################################################
|
| 1914 |
+
###############################################################################
|
| 1915 |
+
# CUDA COMPILE FATBIN
|
| 1916 |
+
###############################################################################
|
| 1917 |
+
###############################################################################
|
| 1918 |
+
macro(CUDA_COMPILE_FATBIN generated_files)
|
| 1919 |
+
cuda_compile_base(cuda_compile_fatbin FATBIN ${generated_files} ${ARGN})
|
| 1920 |
+
endmacro()
|
| 1921 |
+
|
| 1922 |
+
###############################################################################
|
| 1923 |
+
###############################################################################
|
| 1924 |
+
# CUDA COMPILE CUBIN
|
| 1925 |
+
###############################################################################
|
| 1926 |
+
###############################################################################
|
| 1927 |
+
macro(CUDA_COMPILE_CUBIN generated_files)
|
| 1928 |
+
cuda_compile_base(cuda_compile_cubin CUBIN ${generated_files} ${ARGN})
|
| 1929 |
+
endmacro()
|
| 1930 |
+
|
| 1931 |
+
|
| 1932 |
+
###############################################################################
|
| 1933 |
+
###############################################################################
|
| 1934 |
+
# CUDA ADD CUFFT TO TARGET
|
| 1935 |
+
###############################################################################
|
| 1936 |
+
###############################################################################
|
| 1937 |
+
macro(CUDA_ADD_CUFFT_TO_TARGET target)
|
| 1938 |
+
if (CUDA_BUILD_EMULATION)
|
| 1939 |
+
target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufftemu_LIBRARY})
|
| 1940 |
+
else()
|
| 1941 |
+
target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufft_LIBRARY})
|
| 1942 |
+
endif()
|
| 1943 |
+
endmacro()
|
| 1944 |
+
|
| 1945 |
+
###############################################################################
|
| 1946 |
+
###############################################################################
|
| 1947 |
+
# CUDA ADD CUBLAS TO TARGET
|
| 1948 |
+
###############################################################################
|
| 1949 |
+
###############################################################################
|
| 1950 |
+
macro(CUDA_ADD_CUBLAS_TO_TARGET target)
|
| 1951 |
+
if (CUDA_BUILD_EMULATION)
|
| 1952 |
+
target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublasemu_LIBRARY})
|
| 1953 |
+
else()
|
| 1954 |
+
target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY})
|
| 1955 |
+
endif()
|
| 1956 |
+
endmacro()
|
| 1957 |
+
|
| 1958 |
+
###############################################################################
|
| 1959 |
+
###############################################################################
|
| 1960 |
+
# CUDA BUILD CLEAN TARGET
|
| 1961 |
+
###############################################################################
|
| 1962 |
+
###############################################################################
|
| 1963 |
+
macro(CUDA_BUILD_CLEAN_TARGET)
|
| 1964 |
+
# Call this after you add all your CUDA targets, and you will get a
|
| 1965 |
+
# convenience target. You should also make clean after running this target
|
| 1966 |
+
# to get the build system to generate all the code again.
|
| 1967 |
+
|
| 1968 |
+
set(cuda_clean_target_name clean_cuda_depends)
|
| 1969 |
+
if (CMAKE_GENERATOR MATCHES "Visual Studio")
|
| 1970 |
+
string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name)
|
| 1971 |
+
endif()
|
| 1972 |
+
add_custom_target(${cuda_clean_target_name}
|
| 1973 |
+
COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES})
|
| 1974 |
+
|
| 1975 |
+
# Clear out the variable, so the next time we configure it will be empty.
|
| 1976 |
+
# This is useful so that the files won't persist in the list after targets
|
| 1977 |
+
# have been removed.
|
| 1978 |
+
set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
|
| 1979 |
+
endmacro()
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 2 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2007-2009
|
| 7 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 8 |
+
#
|
| 9 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 10 |
+
# for the text of the license.
|
| 11 |
+
|
| 12 |
+
# The MIT License
|
| 13 |
+
#
|
| 14 |
+
# License for the specific language governing rights and limitations under
|
| 15 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 16 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 17 |
+
# to deal in the Software without restriction, including without limitation
|
| 18 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 19 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 20 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 21 |
+
#
|
| 22 |
+
# The above copyright notice and this permission notice shall be included
|
| 23 |
+
# in all copies or substantial portions of the Software.
|
| 24 |
+
#
|
| 25 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 26 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 27 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 28 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 29 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 30 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 31 |
+
# DEALINGS IN THE SOFTWARE.
|
| 32 |
+
#
|
| 33 |
+
|
| 34 |
+
#######################################################################
|
| 35 |
+
# This converts a file written in makefile syntax into one that can be included
|
| 36 |
+
# by CMake.
|
| 37 |
+
|
| 38 |
+
# Input variables
|
| 39 |
+
#
|
| 40 |
+
# verbose:BOOL=<> OFF: Be as quiet as possible (default)
|
| 41 |
+
# ON : Extra output
|
| 42 |
+
#
|
| 43 |
+
# input_file:FILEPATH=<> Path to dependency file in makefile format
|
| 44 |
+
#
|
| 45 |
+
# output_file:FILEPATH=<> Path to file with dependencies in CMake readable variable
|
| 46 |
+
#
|
| 47 |
+
|
| 48 |
+
file(READ ${input_file} depend_text)
|
| 49 |
+
|
| 50 |
+
if (NOT "${depend_text}" STREQUAL "")
|
| 51 |
+
|
| 52 |
+
# message("FOUND DEPENDS")
|
| 53 |
+
|
| 54 |
+
string(REPLACE "\\ " " " depend_text ${depend_text})
|
| 55 |
+
|
| 56 |
+
# This works for the nvcc -M generated dependency files.
|
| 57 |
+
string(REGEX REPLACE "^.* : " "" depend_text ${depend_text})
|
| 58 |
+
string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text})
|
| 59 |
+
|
| 60 |
+
set(dependency_list "")
|
| 61 |
+
|
| 62 |
+
foreach(file ${depend_text})
|
| 63 |
+
|
| 64 |
+
string(REGEX REPLACE "^ +" "" file ${file})
|
| 65 |
+
|
| 66 |
+
# OK, now if we had a UNC path, nvcc has a tendency to only output the first '/'
|
| 67 |
+
# instead of '//'. Here we will test to see if the file exists, if it doesn't then
|
| 68 |
+
# try to prepend another '/' to the path and test again. If it still fails remove the
|
| 69 |
+
# path.
|
| 70 |
+
|
| 71 |
+
if(NOT EXISTS "${file}")
|
| 72 |
+
if (EXISTS "/${file}")
|
| 73 |
+
set(file "/${file}")
|
| 74 |
+
else()
|
| 75 |
+
if(verbose)
|
| 76 |
+
message(WARNING " Removing non-existent dependency file: ${file}")
|
| 77 |
+
endif()
|
| 78 |
+
set(file "")
|
| 79 |
+
endif()
|
| 80 |
+
endif()
|
| 81 |
+
|
| 82 |
+
# Make sure we check to see if we have a file, before asking if it is not a directory.
|
| 83 |
+
# if(NOT IS_DIRECTORY "") will return TRUE.
|
| 84 |
+
if(file AND NOT IS_DIRECTORY "${file}")
|
| 85 |
+
# If softlinks start to matter, we should change this to REALPATH. For now we need
|
| 86 |
+
# to flatten paths, because nvcc can generate stuff like /bin/../include instead of
|
| 87 |
+
# just /include.
|
| 88 |
+
get_filename_component(file_absolute "${file}" ABSOLUTE)
|
| 89 |
+
list(APPEND dependency_list "${file_absolute}")
|
| 90 |
+
endif()
|
| 91 |
+
|
| 92 |
+
endforeach()
|
| 93 |
+
|
| 94 |
+
else()
|
| 95 |
+
# message("FOUND NO DEPENDS")
|
| 96 |
+
endif()
|
| 97 |
+
|
| 98 |
+
# Remove the duplicate entries and sort them.
|
| 99 |
+
list(REMOVE_DUPLICATES dependency_list)
|
| 100 |
+
list(SORT dependency_list)
|
| 101 |
+
|
| 102 |
+
foreach(file ${dependency_list})
|
| 103 |
+
string(APPEND cuda_nvcc_depend " \"${file}\"\n")
|
| 104 |
+
endforeach()
|
| 105 |
+
|
| 106 |
+
file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n")
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 2 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2007-2009
|
| 7 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 8 |
+
#
|
| 9 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 10 |
+
# for the text of the license.
|
| 11 |
+
|
| 12 |
+
# The MIT License
|
| 13 |
+
#
|
| 14 |
+
# License for the specific language governing rights and limitations under
|
| 15 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 16 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 17 |
+
# to deal in the Software without restriction, including without limitation
|
| 18 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 19 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 20 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 21 |
+
#
|
| 22 |
+
# The above copyright notice and this permission notice shall be included
|
| 23 |
+
# in all copies or substantial portions of the Software.
|
| 24 |
+
#
|
| 25 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 26 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 27 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 28 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 29 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 30 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 31 |
+
# DEALINGS IN THE SOFTWARE.
|
| 32 |
+
#
|
| 33 |
+
|
| 34 |
+
#######################################################################
|
| 35 |
+
# Parses a .cubin file produced by nvcc and reports statistics about the file.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
file(READ ${input_file} file_text)
|
| 39 |
+
|
| 40 |
+
if (NOT "${file_text}" STREQUAL "")
|
| 41 |
+
|
| 42 |
+
string(REPLACE ";" "\\;" file_text ${file_text})
|
| 43 |
+
string(REPLACE "\ncode" ";code" file_text ${file_text})
|
| 44 |
+
|
| 45 |
+
list(LENGTH file_text len)
|
| 46 |
+
|
| 47 |
+
foreach(line ${file_text})
|
| 48 |
+
|
| 49 |
+
# Only look at "code { }" blocks.
|
| 50 |
+
if(line MATCHES "^code")
|
| 51 |
+
|
| 52 |
+
# Break into individual lines.
|
| 53 |
+
string(REGEX REPLACE "\n" ";" line ${line})
|
| 54 |
+
|
| 55 |
+
foreach(entry ${line})
|
| 56 |
+
|
| 57 |
+
# Extract kernel names.
|
| 58 |
+
if (${entry} MATCHES "[^g]name = ([^ ]+)")
|
| 59 |
+
set(entry "${CMAKE_MATCH_1}")
|
| 60 |
+
|
| 61 |
+
# Check to see if the kernel name starts with "_"
|
| 62 |
+
set(skip FALSE)
|
| 63 |
+
# if (${entry} MATCHES "^_")
|
| 64 |
+
# Skip the rest of this block.
|
| 65 |
+
# message("Skipping ${entry}")
|
| 66 |
+
# set(skip TRUE)
|
| 67 |
+
# else ()
|
| 68 |
+
message("Kernel: ${entry}")
|
| 69 |
+
# endif ()
|
| 70 |
+
|
| 71 |
+
endif()
|
| 72 |
+
|
| 73 |
+
# Skip the rest of the block if necessary
|
| 74 |
+
if(NOT skip)
|
| 75 |
+
|
| 76 |
+
# Registers
|
| 77 |
+
if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)")
|
| 78 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 79 |
+
message("Registers: ${entry}")
|
| 80 |
+
endif()
|
| 81 |
+
|
| 82 |
+
# Local memory
|
| 83 |
+
if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)")
|
| 84 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 85 |
+
message("Local: ${entry}")
|
| 86 |
+
endif()
|
| 87 |
+
|
| 88 |
+
# Shared memory
|
| 89 |
+
if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)")
|
| 90 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 91 |
+
message("Shared: ${entry}")
|
| 92 |
+
endif()
|
| 93 |
+
|
| 94 |
+
if (${entry} MATCHES "^}")
|
| 95 |
+
message("")
|
| 96 |
+
endif()
|
| 97 |
+
|
| 98 |
+
endif()
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
endforeach()
|
| 102 |
+
|
| 103 |
+
endif()
|
| 104 |
+
|
| 105 |
+
endforeach()
|
| 106 |
+
|
| 107 |
+
else()
|
| 108 |
+
# message("FOUND NO DEPENDS")
|
| 109 |
+
endif()
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 6 |
+
# for the text of the license.
|
| 7 |
+
|
| 8 |
+
# The MIT License
|
| 9 |
+
#
|
| 10 |
+
# License for the specific language governing rights and limitations under
|
| 11 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 12 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 13 |
+
# to deal in the Software without restriction, including without limitation
|
| 14 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 15 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 16 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 17 |
+
#
|
| 18 |
+
# The above copyright notice and this permission notice shall be included
|
| 19 |
+
# in all copies or substantial portions of the Software.
|
| 20 |
+
#
|
| 21 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 22 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 23 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 24 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 25 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 26 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 27 |
+
# DEALINGS IN THE SOFTWARE.
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
##########################################################################
|
| 31 |
+
# This file runs the nvcc commands to produce the desired output file along with
|
| 32 |
+
# the dependency file needed by CMake to compute dependencies. In addition the
|
| 33 |
+
# file checks the output of each command and if the command fails it deletes the
|
| 34 |
+
# output files.
|
| 35 |
+
|
| 36 |
+
# Input variables
|
| 37 |
+
#
|
| 38 |
+
# verbose:BOOL=<> OFF: Be as quiet as possible (default)
|
| 39 |
+
# ON : Describe each step
|
| 40 |
+
#
|
| 41 |
+
# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or
|
| 42 |
+
# RelWithDebInfo, but it should match one of the
|
| 43 |
+
# entries in CUDA_HOST_FLAGS. This is the build
|
| 44 |
+
# configuration used when compiling the code. If
|
| 45 |
+
# blank or unspecified Debug is assumed as this is
|
| 46 |
+
# what CMake does.
|
| 47 |
+
#
|
| 48 |
+
# generated_file:STRING=<> File to generate. This argument must be passed in.
|
| 49 |
+
#
|
| 50 |
+
# generated_cubin_file:STRING=<> File to generate. This argument must be passed
|
| 51 |
+
# in if build_cubin is true.
|
| 52 |
+
|
| 53 |
+
cmake_policy(PUSH)
|
| 54 |
+
cmake_policy(SET CMP0007 NEW)
|
| 55 |
+
cmake_policy(SET CMP0010 NEW)
|
| 56 |
+
if(NOT generated_file)
|
| 57 |
+
message(FATAL_ERROR "You must specify generated_file on the command line")
|
| 58 |
+
endif()
|
| 59 |
+
|
| 60 |
+
# Set these up as variables to make reading the generated file easier
|
| 61 |
+
set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path
|
| 62 |
+
set(source_file "@source_file@") # path
|
| 63 |
+
set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path
|
| 64 |
+
set(cmake_dependency_file "@cmake_dependency_file@") # path
|
| 65 |
+
set(CUDA_make2cmake "@CUDA_make2cmake@") # path
|
| 66 |
+
set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path
|
| 67 |
+
set(build_cubin @build_cubin@) # bool
|
| 68 |
+
set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path
|
| 69 |
+
# We won't actually use these variables for now, but we need to set this, in
|
| 70 |
+
# order to force this file to be run again if it changes.
|
| 71 |
+
set(generated_file_path "@generated_file_path@") # path
|
| 72 |
+
set(generated_file_internal "@generated_file@") # path
|
| 73 |
+
set(generated_cubin_file_internal "@generated_cubin_file@") # path
|
| 74 |
+
|
| 75 |
+
set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path
|
| 76 |
+
set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list
|
| 77 |
+
@CUDA_NVCC_FLAGS_CONFIG@
|
| 78 |
+
set(nvcc_flags @nvcc_flags@) # list
|
| 79 |
+
set(CUDA_NVCC_INCLUDE_DIRS [==[@CUDA_NVCC_INCLUDE_DIRS@]==]) # list (needs to be in lua quotes to address backslashes)
|
| 80 |
+
string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}")
|
| 81 |
+
set(CUDA_NVCC_COMPILE_DEFINITIONS [==[@CUDA_NVCC_COMPILE_DEFINITIONS@]==]) # list (needs to be in lua quotes see #16510 ).
|
| 82 |
+
set(format_flag "@format_flag@") # string
|
| 83 |
+
set(cuda_language_flag @cuda_language_flag@) # list
|
| 84 |
+
|
| 85 |
+
# Clean up list of include directories and add -I flags
|
| 86 |
+
list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS)
|
| 87 |
+
set(CUDA_NVCC_INCLUDE_ARGS)
|
| 88 |
+
foreach(dir ${CUDA_NVCC_INCLUDE_DIRS})
|
| 89 |
+
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
|
| 90 |
+
list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}")
|
| 91 |
+
endforeach()
|
| 92 |
+
|
| 93 |
+
# Clean up list of compile definitions, add -D flags, and append to nvcc_flags
|
| 94 |
+
list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS)
|
| 95 |
+
foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS})
|
| 96 |
+
list(APPEND nvcc_flags "-D${def}")
|
| 97 |
+
endforeach()
|
| 98 |
+
|
| 99 |
+
if(build_cubin AND NOT generated_cubin_file)
|
| 100 |
+
message(FATAL_ERROR "You must specify generated_cubin_file on the command line")
|
| 101 |
+
endif()
|
| 102 |
+
|
| 103 |
+
# This is the list of host compilation flags. It C or CXX should already have
|
| 104 |
+
# been chosen by FindCUDA.cmake.
|
| 105 |
+
@CUDA_HOST_FLAGS@
|
| 106 |
+
|
| 107 |
+
# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler
|
| 108 |
+
set(nvcc_host_compiler_flags "")
|
| 109 |
+
# If we weren't given a build_configuration, use Debug.
|
| 110 |
+
if(NOT build_configuration)
|
| 111 |
+
set(build_configuration Debug)
|
| 112 |
+
endif()
|
| 113 |
+
string(TOUPPER "${build_configuration}" build_configuration)
|
| 114 |
+
#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}")
|
| 115 |
+
foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}})
|
| 116 |
+
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
|
| 117 |
+
string(APPEND nvcc_host_compiler_flags ",\"${flag}\"")
|
| 118 |
+
endforeach()
|
| 119 |
+
if (nvcc_host_compiler_flags)
|
| 120 |
+
set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags})
|
| 121 |
+
endif()
|
| 122 |
+
#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"")
|
| 123 |
+
# Add the build specific configuration flags
|
| 124 |
+
list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}})
|
| 125 |
+
|
| 126 |
+
# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority
|
| 127 |
+
list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 )
|
| 128 |
+
list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 )
|
| 129 |
+
if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER )
|
| 130 |
+
if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN)
|
| 131 |
+
set(CCBIN -ccbin "${CCBIN}")
|
| 132 |
+
else()
|
| 133 |
+
set(CCBIN -ccbin "${CUDA_HOST_COMPILER}")
|
| 134 |
+
endif()
|
| 135 |
+
endif()
|
| 136 |
+
|
| 137 |
+
# cuda_execute_process - Executes a command with optional command echo and status message.
|
| 138 |
+
#
|
| 139 |
+
# status - Status message to print if verbose is true
|
| 140 |
+
# command - COMMAND argument from the usual execute_process argument structure
|
| 141 |
+
# ARGN - Remaining arguments are the command with arguments
|
| 142 |
+
#
|
| 143 |
+
# CUDA_result - return value from running the command
|
| 144 |
+
#
|
| 145 |
+
# Make this a macro instead of a function, so that things like RESULT_VARIABLE
|
| 146 |
+
# and other return variables are present after executing the process.
|
| 147 |
+
macro(cuda_execute_process status command)
|
| 148 |
+
set(_command ${command})
|
| 149 |
+
if(NOT "x${_command}" STREQUAL "xCOMMAND")
|
| 150 |
+
message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})")
|
| 151 |
+
endif()
|
| 152 |
+
if(verbose)
|
| 153 |
+
execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status})
|
| 154 |
+
# Now we need to build up our command string. We are accounting for quotes
|
| 155 |
+
# and spaces, anything else is left up to the user to fix if they want to
|
| 156 |
+
# copy and paste a runnable command line.
|
| 157 |
+
set(cuda_execute_process_string)
|
| 158 |
+
foreach(arg ${ARGN})
|
| 159 |
+
# If there are quotes, excape them, so they come through.
|
| 160 |
+
string(REPLACE "\"" "\\\"" arg ${arg})
|
| 161 |
+
# Args with spaces need quotes around them to get them to be parsed as a single argument.
|
| 162 |
+
if(arg MATCHES " ")
|
| 163 |
+
list(APPEND cuda_execute_process_string "\"${arg}\"")
|
| 164 |
+
else()
|
| 165 |
+
list(APPEND cuda_execute_process_string ${arg})
|
| 166 |
+
endif()
|
| 167 |
+
endforeach()
|
| 168 |
+
# Echo the command
|
| 169 |
+
execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string})
|
| 170 |
+
endif()
|
| 171 |
+
# Run the command
|
| 172 |
+
execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result )
|
| 173 |
+
endmacro()
|
| 174 |
+
|
| 175 |
+
# Delete the target file
|
| 176 |
+
cuda_execute_process(
|
| 177 |
+
"Removing ${generated_file}"
|
| 178 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag
|
| 182 |
+
# for dependency generation and hope for the best.
|
| 183 |
+
set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}")
|
| 184 |
+
set(CUDA_VERSION @CUDA_VERSION@)
|
| 185 |
+
|
| 186 |
+
# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This
|
| 187 |
+
# can cause incorrect dependencies when #including files based on this macro which is
|
| 188 |
+
# defined in the generating passes of nvcc invocation. We will go ahead and manually
|
| 189 |
+
# define this for now until a future version fixes this bug.
|
| 190 |
+
set(CUDACC_DEFINE -D__CUDACC__)
|
| 191 |
+
|
| 192 |
+
# Generate the dependency file
|
| 193 |
+
cuda_execute_process(
|
| 194 |
+
"Generating dependency file: ${NVCC_generated_dependency_file}"
|
| 195 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 196 |
+
-M
|
| 197 |
+
${CUDACC_DEFINE}
|
| 198 |
+
"${source_file}"
|
| 199 |
+
-o "${NVCC_generated_dependency_file}"
|
| 200 |
+
${CCBIN}
|
| 201 |
+
${nvcc_flags}
|
| 202 |
+
${nvcc_host_compiler_flags}
|
| 203 |
+
${depends_CUDA_NVCC_FLAGS}
|
| 204 |
+
-DNVCC
|
| 205 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
if(CUDA_result)
|
| 209 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 210 |
+
endif()
|
| 211 |
+
|
| 212 |
+
# Generate the cmake readable dependency file to a temp file. Don't put the
|
| 213 |
+
# quotes just around the filenames for the input_file and output_file variables.
|
| 214 |
+
# CMake will pass the quotes through and not be able to find the file.
|
| 215 |
+
cuda_execute_process(
|
| 216 |
+
"Generating temporary cmake readable file: ${cmake_dependency_file}.tmp"
|
| 217 |
+
COMMAND "${CMAKE_COMMAND}"
|
| 218 |
+
-D "input_file:FILEPATH=${NVCC_generated_dependency_file}"
|
| 219 |
+
-D "output_file:FILEPATH=${cmake_dependency_file}.tmp"
|
| 220 |
+
-D "verbose=${verbose}"
|
| 221 |
+
-P "${CUDA_make2cmake}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if(CUDA_result)
|
| 225 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 226 |
+
endif()
|
| 227 |
+
|
| 228 |
+
# Copy the file if it is different
|
| 229 |
+
cuda_execute_process(
|
| 230 |
+
"Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}"
|
| 231 |
+
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}"
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if(CUDA_result)
|
| 235 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 236 |
+
endif()
|
| 237 |
+
|
| 238 |
+
# Delete the temporary file
|
| 239 |
+
cuda_execute_process(
|
| 240 |
+
"Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}"
|
| 241 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
if(CUDA_result)
|
| 245 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 246 |
+
endif()
|
| 247 |
+
|
| 248 |
+
# Generate the code
|
| 249 |
+
cuda_execute_process(
|
| 250 |
+
"Generating ${generated_file}"
|
| 251 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 252 |
+
"${source_file}"
|
| 253 |
+
${cuda_language_flag}
|
| 254 |
+
${format_flag} -o "${generated_file}"
|
| 255 |
+
${CCBIN}
|
| 256 |
+
${nvcc_flags}
|
| 257 |
+
${nvcc_host_compiler_flags}
|
| 258 |
+
${CUDA_NVCC_FLAGS}
|
| 259 |
+
-DNVCC
|
| 260 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
if(CUDA_result)
|
| 264 |
+
# Since nvcc can sometimes leave half done files make sure that we delete the output file.
|
| 265 |
+
cuda_execute_process(
|
| 266 |
+
"Removing ${generated_file}"
|
| 267 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
|
| 268 |
+
)
|
| 269 |
+
message(FATAL_ERROR "Error generating file ${generated_file}")
|
| 270 |
+
else()
|
| 271 |
+
if(verbose)
|
| 272 |
+
message("Generated ${generated_file} successfully.")
|
| 273 |
+
endif()
|
| 274 |
+
endif()
|
| 275 |
+
|
| 276 |
+
# Cubin resource report commands.
|
| 277 |
+
if( build_cubin )
|
| 278 |
+
# Run with -cubin to produce resource usage report.
|
| 279 |
+
cuda_execute_process(
|
| 280 |
+
"Generating ${generated_cubin_file}"
|
| 281 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 282 |
+
"${source_file}"
|
| 283 |
+
${CUDA_NVCC_FLAGS}
|
| 284 |
+
${nvcc_flags}
|
| 285 |
+
${CCBIN}
|
| 286 |
+
${nvcc_host_compiler_flags}
|
| 287 |
+
-DNVCC
|
| 288 |
+
-cubin
|
| 289 |
+
-o "${generated_cubin_file}"
|
| 290 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# Execute the parser script.
|
| 294 |
+
cuda_execute_process(
|
| 295 |
+
"Executing the parser script"
|
| 296 |
+
COMMAND "${CMAKE_COMMAND}"
|
| 297 |
+
-D "input_file:STRING=${generated_cubin_file}"
|
| 298 |
+
-P "${CUDA_parse_cubin}"
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
endif()
|
| 302 |
+
|
| 303 |
+
cmake_policy(POP)
|
llava_next/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Synopsis:
|
| 2 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures])
|
| 3 |
+
# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures
|
| 4 |
+
# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...)
|
| 5 |
+
# - "Auto" detects local machine GPU compute arch at runtime.
|
| 6 |
+
# - "Common" and "All" cover common and entire subsets of architectures
|
| 7 |
+
# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
|
| 8 |
+
# NAME: Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal Volta Turing Ampere
|
| 9 |
+
# NUM: Any number. Only those pairs are currently accepted by NVCC though:
|
| 10 |
+
# 3.5 3.7 5.0 5.2 5.3 6.0 6.2 7.0 7.2 7.5 8.0
|
| 11 |
+
# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable}
|
| 12 |
+
# Additionally, sets ${out_variable}_readable to the resulting numeric list
|
| 13 |
+
# Example:
|
| 14 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell)
|
| 15 |
+
# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS})
|
| 16 |
+
#
|
| 17 |
+
# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA
|
| 18 |
+
#
|
| 19 |
+
|
| 20 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 21 |
+
if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA"
|
| 22 |
+
AND CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
|
| 23 |
+
set(CUDA_VERSION "${CMAKE_MATCH_1}")
|
| 24 |
+
endif()
|
| 25 |
+
endif()
|
| 26 |
+
|
| 27 |
+
# See: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
| 28 |
+
|
| 29 |
+
# This list will be used for CUDA_ARCH_NAME = All option
|
| 30 |
+
set(CUDA_KNOWN_GPU_ARCHITECTURES "Kepler" "Maxwell")
|
| 31 |
+
|
| 32 |
+
# This list will be used for CUDA_ARCH_NAME = Common option (enabled by default)
|
| 33 |
+
set(CUDA_COMMON_GPU_ARCHITECTURES "3.5" "5.0")
|
| 34 |
+
|
| 35 |
+
# This list is used to filter CUDA archs when autodetecting
|
| 36 |
+
set(CUDA_ALL_GPU_ARCHITECTURES "3.5" "5.0")
|
| 37 |
+
|
| 38 |
+
if(CUDA_VERSION VERSION_GREATER "10.5")
|
| 39 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ampere")
|
| 40 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0")
|
| 41 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.0")
|
| 42 |
+
|
| 43 |
+
if(CUDA_VERSION VERSION_LESS "11.1")
|
| 44 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "8.0")
|
| 45 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0+PTX")
|
| 46 |
+
endif()
|
| 47 |
+
endif()
|
| 48 |
+
|
| 49 |
+
if(NOT CUDA_VERSION VERSION_LESS "11.1")
|
| 50 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6")
|
| 51 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6")
|
| 52 |
+
set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6")
|
| 53 |
+
|
| 54 |
+
if(CUDA_VERSION VERSION_LESS "11.8")
|
| 55 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9")
|
| 56 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6+PTX")
|
| 57 |
+
endif()
|
| 58 |
+
endif()
|
| 59 |
+
|
| 60 |
+
if(NOT CUDA_VERSION VERSION_LESS "11.8")
|
| 61 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada")
|
| 62 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper")
|
| 63 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9")
|
| 64 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0")
|
| 65 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9")
|
| 66 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0")
|
| 67 |
+
|
| 68 |
+
if(CUDA_VERSION VERSION_LESS "12.0")
|
| 69 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0")
|
| 70 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX")
|
| 71 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0+PTX")
|
| 72 |
+
endif()
|
| 73 |
+
endif()
|
| 74 |
+
|
| 75 |
+
################################################################################################
|
| 76 |
+
# A function for automatic detection of GPUs installed (if autodetection is enabled)
|
| 77 |
+
# Usage:
|
| 78 |
+
# CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE)
|
| 79 |
+
#
|
| 80 |
+
function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE)
|
| 81 |
+
if(NOT CUDA_GPU_DETECT_OUTPUT)
|
| 82 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 83 |
+
set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cu")
|
| 84 |
+
else()
|
| 85 |
+
set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cpp")
|
| 86 |
+
endif()
|
| 87 |
+
|
| 88 |
+
file(WRITE ${file} ""
|
| 89 |
+
"#include <cuda_runtime.h>\n"
|
| 90 |
+
"#include <cstdio>\n"
|
| 91 |
+
"int main()\n"
|
| 92 |
+
"{\n"
|
| 93 |
+
" int count = 0;\n"
|
| 94 |
+
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
|
| 95 |
+
" if (count == 0) return -1;\n"
|
| 96 |
+
" for (int device = 0; device < count; ++device)\n"
|
| 97 |
+
" {\n"
|
| 98 |
+
" cudaDeviceProp prop;\n"
|
| 99 |
+
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
|
| 100 |
+
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
|
| 101 |
+
" }\n"
|
| 102 |
+
" return 0;\n"
|
| 103 |
+
"}\n")
|
| 104 |
+
|
| 105 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 106 |
+
try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file}
|
| 107 |
+
RUN_OUTPUT_VARIABLE compute_capabilities)
|
| 108 |
+
else()
|
| 109 |
+
try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file}
|
| 110 |
+
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}"
|
| 111 |
+
LINK_LIBRARIES ${CUDA_LIBRARIES}
|
| 112 |
+
RUN_OUTPUT_VARIABLE compute_capabilities)
|
| 113 |
+
endif()
|
| 114 |
+
|
| 115 |
+
# Filter unrelated content out of the output.
|
| 116 |
+
string(REGEX MATCHALL "[0-9]+\\.[0-9]+" compute_capabilities "${compute_capabilities}")
|
| 117 |
+
|
| 118 |
+
if(run_result EQUAL 0)
|
| 119 |
+
string(REPLACE "2.1" "2.1(2.0)" compute_capabilities "${compute_capabilities}")
|
| 120 |
+
set(CUDA_GPU_DETECT_OUTPUT ${compute_capabilities}
|
| 121 |
+
CACHE INTERNAL "Returned GPU architectures from detect_gpus tool" FORCE)
|
| 122 |
+
endif()
|
| 123 |
+
endif()
|
| 124 |
+
|
| 125 |
+
if(NOT CUDA_GPU_DETECT_OUTPUT)
|
| 126 |
+
message(STATUS "Automatic GPU detection failed. Building for common architectures.")
|
| 127 |
+
set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE)
|
| 128 |
+
else()
|
| 129 |
+
# Filter based on CUDA version supported archs
|
| 130 |
+
set(CUDA_GPU_DETECT_OUTPUT_FILTERED "")
|
| 131 |
+
separate_arguments(CUDA_GPU_DETECT_OUTPUT)
|
| 132 |
+
foreach(ITEM IN ITEMS ${CUDA_GPU_DETECT_OUTPUT})
|
| 133 |
+
if(CUDA_LIMIT_GPU_ARCHITECTURE AND (ITEM VERSION_GREATER CUDA_LIMIT_GPU_ARCHITECTURE OR
|
| 134 |
+
ITEM VERSION_EQUAL CUDA_LIMIT_GPU_ARCHITECTURE))
|
| 135 |
+
list(GET CUDA_COMMON_GPU_ARCHITECTURES -1 NEWITEM)
|
| 136 |
+
string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${NEWITEM}")
|
| 137 |
+
else()
|
| 138 |
+
string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${ITEM}")
|
| 139 |
+
endif()
|
| 140 |
+
endforeach()
|
| 141 |
+
|
| 142 |
+
set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT_FILTERED} PARENT_SCOPE)
|
| 143 |
+
endif()
|
| 144 |
+
endfunction()
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
################################################################################################
|
| 148 |
+
# Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list
|
| 149 |
+
# Usage:
|
| 150 |
+
# SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs])
|
| 151 |
+
function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable)
|
| 152 |
+
set(CUDA_ARCH_LIST "${ARGN}")
|
| 153 |
+
|
| 154 |
+
if("X${CUDA_ARCH_LIST}" STREQUAL "X" )
|
| 155 |
+
set(CUDA_ARCH_LIST "Auto")
|
| 156 |
+
endif()
|
| 157 |
+
|
| 158 |
+
set(cuda_arch_bin)
|
| 159 |
+
set(cuda_arch_ptx)
|
| 160 |
+
|
| 161 |
+
if("${CUDA_ARCH_LIST}" STREQUAL "All")
|
| 162 |
+
set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES})
|
| 163 |
+
elseif("${CUDA_ARCH_LIST}" STREQUAL "Common")
|
| 164 |
+
set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES})
|
| 165 |
+
elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto")
|
| 166 |
+
CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST)
|
| 167 |
+
message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}")
|
| 168 |
+
endif()
|
| 169 |
+
|
| 170 |
+
# Now process the list and look for names
|
| 171 |
+
string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}")
|
| 172 |
+
list(REMOVE_DUPLICATES CUDA_ARCH_LIST)
|
| 173 |
+
foreach(arch_name ${CUDA_ARCH_LIST})
|
| 174 |
+
set(arch_bin)
|
| 175 |
+
set(arch_ptx)
|
| 176 |
+
set(add_ptx FALSE)
|
| 177 |
+
# Check to see if we are compiling PTX
|
| 178 |
+
if(arch_name MATCHES "(.*)\\+PTX$")
|
| 179 |
+
set(add_ptx TRUE)
|
| 180 |
+
set(arch_name ${CMAKE_MATCH_1})
|
| 181 |
+
endif()
|
| 182 |
+
if(arch_name MATCHES "^([0-9]\\.[0-9](\\([0-9]\\.[0-9]\\))?)$")
|
| 183 |
+
set(arch_bin ${CMAKE_MATCH_1})
|
| 184 |
+
set(arch_ptx ${arch_bin})
|
| 185 |
+
else()
|
| 186 |
+
# Look for it in our list of known architectures
|
| 187 |
+
if(${arch_name} STREQUAL "Kepler+Tesla")
|
| 188 |
+
set(arch_bin 3.7)
|
| 189 |
+
elseif(${arch_name} STREQUAL "Kepler")
|
| 190 |
+
set(arch_bin 3.5)
|
| 191 |
+
set(arch_ptx 3.5)
|
| 192 |
+
elseif(${arch_name} STREQUAL "Maxwell+Tegra")
|
| 193 |
+
set(arch_bin 5.3)
|
| 194 |
+
elseif(${arch_name} STREQUAL "Maxwell")
|
| 195 |
+
set(arch_bin 5.0 5.2)
|
| 196 |
+
set(arch_ptx 5.2)
|
| 197 |
+
elseif(${arch_name} STREQUAL "Pascal")
|
| 198 |
+
set(arch_bin 6.0 6.1)
|
| 199 |
+
set(arch_ptx 6.1)
|
| 200 |
+
elseif(${arch_name} STREQUAL "Volta+Tegra")
|
| 201 |
+
set(arch_bin 7.2)
|
| 202 |
+
elseif(${arch_name} STREQUAL "Volta")
|
| 203 |
+
set(arch_bin 7.0 7.0)
|
| 204 |
+
set(arch_ptx 7.0)
|
| 205 |
+
elseif(${arch_name} STREQUAL "Turing")
|
| 206 |
+
set(arch_bin 7.5)
|
| 207 |
+
set(arch_ptx 7.5)
|
| 208 |
+
elseif(${arch_name} STREQUAL "Ampere+Tegra")
|
| 209 |
+
set(arch_bin 8.7)
|
| 210 |
+
elseif(${arch_name} STREQUAL "Ampere")
|
| 211 |
+
set(arch_bin 8.0 8.6)
|
| 212 |
+
set(arch_ptx 8.0 8.6)
|
| 213 |
+
elseif(${arch_name} STREQUAL "Ada")
|
| 214 |
+
set(arch_bin 8.9)
|
| 215 |
+
set(arch_ptx 8.9)
|
| 216 |
+
elseif(${arch_name} STREQUAL "Hopper")
|
| 217 |
+
set(arch_bin 9.0)
|
| 218 |
+
set(arch_ptx 9.0)
|
| 219 |
+
else()
|
| 220 |
+
message(SEND_ERROR "Unknown CUDA Architecture Name ${arch_name} in CUDA_SELECT_NVCC_ARCH_FLAGS")
|
| 221 |
+
endif()
|
| 222 |
+
endif()
|
| 223 |
+
if(NOT arch_bin)
|
| 224 |
+
message(SEND_ERROR "arch_bin wasn't set for some reason")
|
| 225 |
+
endif()
|
| 226 |
+
list(APPEND cuda_arch_bin ${arch_bin})
|
| 227 |
+
if(add_ptx)
|
| 228 |
+
if (NOT arch_ptx)
|
| 229 |
+
set(arch_ptx ${arch_bin})
|
| 230 |
+
endif()
|
| 231 |
+
list(APPEND cuda_arch_ptx ${arch_ptx})
|
| 232 |
+
endif()
|
| 233 |
+
endforeach()
|
| 234 |
+
|
| 235 |
+
# remove dots and convert to lists
|
| 236 |
+
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
|
| 237 |
+
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
|
| 238 |
+
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
|
| 239 |
+
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
|
| 240 |
+
|
| 241 |
+
if(cuda_arch_bin)
|
| 242 |
+
list(REMOVE_DUPLICATES cuda_arch_bin)
|
| 243 |
+
endif()
|
| 244 |
+
if(cuda_arch_ptx)
|
| 245 |
+
list(REMOVE_DUPLICATES cuda_arch_ptx)
|
| 246 |
+
endif()
|
| 247 |
+
|
| 248 |
+
set(nvcc_flags "")
|
| 249 |
+
set(nvcc_archs_readable "")
|
| 250 |
+
|
| 251 |
+
# Tell NVCC to add binaries for the specified GPUs
|
| 252 |
+
foreach(arch ${cuda_arch_bin})
|
| 253 |
+
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
|
| 254 |
+
# User explicitly specified ARCH for the concrete CODE
|
| 255 |
+
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
|
| 256 |
+
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
|
| 257 |
+
else()
|
| 258 |
+
# User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE
|
| 259 |
+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
|
| 260 |
+
list(APPEND nvcc_archs_readable sm_${arch})
|
| 261 |
+
endif()
|
| 262 |
+
endforeach()
|
| 263 |
+
|
| 264 |
+
# Tell NVCC to add PTX intermediate code for the specified architectures
|
| 265 |
+
foreach(arch ${cuda_arch_ptx})
|
| 266 |
+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
|
| 267 |
+
list(APPEND nvcc_archs_readable compute_${arch})
|
| 268 |
+
endforeach()
|
| 269 |
+
|
| 270 |
+
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
|
| 271 |
+
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
|
| 272 |
+
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
|
| 273 |
+
endfunction()
|