file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
lit/extern/otk-pyoptix/optix/pybind11/tools/make_changelog.py | Python | #!/usr/bin/env python3
import re
import ghapi.all
from rich import print
from rich.syntax import Syntax
ENTRY = re.compile(
r"""
Suggested \s changelog \s entry:
.*
```rst
\s*
(.*?)
\s*
```
""",
re.DOTALL | re.VERBOSE,
)
print()
api = ghapi.all.GhApi(owner="pybind", repo="pybind11")
issues_pages = ghapi.page.paged(
api.issues.list_for_repo, labels="needs changelog", state="closed"
)
issues = (issue for page in issues_pages for issue in page)
missing = []
for issue in issues:
changelog = ENTRY.findall(issue.body or "")
if not changelog or not changelog[0]:
missing.append(issue)
else:
(msg,) = changelog
if not msg.startswith("* "):
msg = "* " + msg
if not msg.endswith("."):
msg += "."
msg += f"\n `#{issue.number} <{issue.html_url}>`_"
print(Syntax(msg, "rst", theme="ansi_light", word_wrap=True))
print()
if missing:
print()
print("[blue]" + "-" * 30)
print()
for issue in missing:
print(f"[red bold]Missing:[/red bold][red] {issue.title}")
print(f"[red] {issue.html_url}\n")
print("[bold]Template:\n")
msg = "## Suggested changelog entry:\n\n```rst\n\n```"
print(Syntax(msg, "md", theme="ansi_light"))
print()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tools/pybind11Common.cmake | CMake | #[======================================================[.rst
Adds the following targets::
pybind11::pybind11 - link to headers and pybind11
pybind11::module - Adds module links
pybind11::embed - Adds embed links
pybind11::lto - Link time optimizations (only if CMAKE_INTERPROCEDURAL_OPTIMIZATION is not set)
pybind11::thin_lto - Link time optimizations (only if CMAKE_INTERPROCEDURAL_OPTIMIZATION is not set)
pybind11::python_link_helper - Adds link to Python libraries
pybind11::windows_extras - MSVC bigobj and mp for building multithreaded
pybind11::opt_size - avoid optimizations that increase code size
Adds the following functions::
pybind11_strip(target) - strip target after building on linux/macOS
pybind11_find_import(module) - See if a module is installed.
#]======================================================]
# CMake 3.10 has an include_guard command, but we can't use that yet
# include_guard(global) (pre-CMake 3.10)
if(TARGET pybind11::pybind11)
return()
endif()
# If we are in subdirectory mode, all IMPORTED targets must be GLOBAL. If we
# are in CONFIG mode, they should be "normal" targets instead.
# In CMake 3.11+ you can promote a target to global after you create it,
# which might be simpler than this check.
get_property(
is_config
TARGET pybind11::headers
PROPERTY IMPORTED)
if(NOT is_config)
set(optional_global GLOBAL)
endif()
# If not run in Python mode, we still would like this to at least
# include pybind11's include directory:
set(pybind11_INCLUDE_DIRS
"${pybind11_INCLUDE_DIR}"
CACHE INTERNAL "Include directory for pybind11 (Python not requested)")
# --------------------- Shared targets ----------------------------
# Build an interface library target:
add_library(pybind11::pybind11 IMPORTED INTERFACE ${optional_global})
set_property(
TARGET pybind11::pybind11
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::headers)
# Build a module target:
add_library(pybind11::module IMPORTED INTERFACE ${optional_global})
set_property(
TARGET pybind11::module
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11)
# Build an embed library target:
add_library(pybind11::embed IMPORTED INTERFACE ${optional_global})
set_property(
TARGET pybind11::embed
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11)
# --------------------------- link helper ---------------------------
add_library(pybind11::python_link_helper IMPORTED INTERFACE ${optional_global})
if(CMAKE_VERSION VERSION_LESS 3.13)
# In CMake 3.11+, you can set INTERFACE properties via the normal methods, and
# this would be simpler.
set_property(
TARGET pybind11::python_link_helper
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES "$<$<PLATFORM_ID:Darwin>:-undefined dynamic_lookup>")
else()
# link_options was added in 3.13+
# This is safer, because you are ensured the deduplication pass in CMake will not consider
# these separate and remove one but not the other.
set_property(
TARGET pybind11::python_link_helper
APPEND
PROPERTY INTERFACE_LINK_OPTIONS "$<$<PLATFORM_ID:Darwin>:LINKER:-undefined,dynamic_lookup>")
endif()
# ------------------------ Windows extras -------------------------
add_library(pybind11::windows_extras IMPORTED INTERFACE ${optional_global})
if(MSVC) # That's also clang-cl
# /bigobj is needed for bigger binding projects due to the limit to 64k
# addressable sections
set_property(
TARGET pybind11::windows_extras
APPEND
PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CXX>:/bigobj>)
# /MP enables multithreaded builds (relevant when there are many files) for MSVC
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # no Clang no Intel
if(CMAKE_VERSION VERSION_LESS 3.11)
set_property(
TARGET pybind11::windows_extras
APPEND
PROPERTY INTERFACE_COMPILE_OPTIONS $<$<NOT:$<CONFIG:Debug>>:/MP>)
else()
# Only set these options for C++ files. This is important so that, for
# instance, projects that include other types of source files like CUDA
# .cu files don't get these options propagated to nvcc since that would
# cause the build to fail.
set_property(
TARGET pybind11::windows_extras
APPEND
PROPERTY INTERFACE_COMPILE_OPTIONS
$<$<NOT:$<CONFIG:Debug>>:$<$<COMPILE_LANGUAGE:CXX>:/MP>>)
endif()
endif()
endif()
# ----------------------- Optimize binary size --------------------------
add_library(pybind11::opt_size IMPORTED INTERFACE ${optional_global})
if(MSVC)
set(PYBIND11_OPT_SIZE /Os)
else()
set(PYBIND11_OPT_SIZE -Os)
endif()
set_property(
TARGET pybind11::opt_size
APPEND
PROPERTY INTERFACE_COMPILE_OPTIONS $<$<CONFIG:Release>:${PYBIND11_OPT_SIZE}>
$<$<CONFIG:MinSizeRel>:${PYBIND11_OPT_SIZE}>
$<$<CONFIG:RelWithDebInfo>:${PYBIND11_OPT_SIZE}>)
# ----------------------- Legacy option --------------------------
# Warn or error if old variable name used
if(PYBIND11_CPP_STANDARD)
string(REGEX MATCH [[..$]] VAL "${PYBIND11_CPP_STANDARD}")
if(CMAKE_CXX_STANDARD)
if(NOT CMAKE_CXX_STANDARD STREQUAL VAL)
message(WARNING "CMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} does not match "
"PYBIND11_CPP_STANDARD=${PYBIND11_CPP_STANDARD}, "
"please remove PYBIND11_CPP_STANDARD from your cache")
endif()
else()
set(supported_standards 11 14 17 20)
if("${VAL}" IN_LIST supported_standards)
message(WARNING "USE -DCMAKE_CXX_STANDARD=${VAL} instead of PYBIND11_CPP_STANDARD")
set(CMAKE_CXX_STANDARD
${VAL}
CACHE STRING "From PYBIND11_CPP_STANDARD")
else()
message(FATAL_ERROR "PYBIND11_CPP_STANDARD should be replaced with CMAKE_CXX_STANDARD "
"(last two chars: ${VAL} not understood as a valid CXX std)")
endif()
endif()
endif()
# --------------------- Python specifics -------------------------
# CMake 3.27 removes the classic FindPythonInterp if CMP0148 is NEW
if(CMAKE_VERSION VERSION_LESS "3.27")
set(_pybind11_missing_old_python "OLD")
else()
cmake_policy(GET CMP0148 _pybind11_missing_old_python)
endif()
# Check to see which Python mode we are in, new, old, or no python
if(PYBIND11_NOPYTHON)
set(_pybind11_nopython ON)
elseif(
_pybind11_missing_old_python STREQUAL "NEW"
OR PYBIND11_FINDPYTHON
OR Python_FOUND
OR Python2_FOUND
OR Python3_FOUND)
# New mode
include("${CMAKE_CURRENT_LIST_DIR}/pybind11NewTools.cmake")
else()
# Classic mode
include("${CMAKE_CURRENT_LIST_DIR}/pybind11Tools.cmake")
endif()
# --------------------- pybind11_find_import -------------------------------
if(NOT _pybind11_nopython)
# Check to see if modules are importable. Use REQUIRED to force an error if
# one of the modules is not found. <package_name>_FOUND will be set if the
# package was found (underscores replace dashes if present). QUIET will hide
# the found message, and VERSION will require a minimum version. A successful
# find will cache the result.
function(pybind11_find_import PYPI_NAME)
# CMake variables need underscores (PyPI doesn't care)
string(REPLACE "-" "_" NORM_PYPI_NAME "${PYPI_NAME}")
# Return if found previously
if(${NORM_PYPI_NAME}_FOUND)
return()
endif()
set(options "REQUIRED;QUIET")
set(oneValueArgs "VERSION")
cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "" ${ARGN})
if(ARG_REQUIRED)
set(status_level FATAL_ERROR)
else()
set(status_level WARNING)
endif()
execute_process(
COMMAND
${${_Python}_EXECUTABLE} -c
"from pkg_resources import get_distribution; print(get_distribution('${PYPI_NAME}').version)"
RESULT_VARIABLE RESULT_PRESENT
OUTPUT_VARIABLE PKG_VERSION
ERROR_QUIET)
string(STRIP "${PKG_VERSION}" PKG_VERSION)
# If a result is present, this failed
if(RESULT_PRESENT)
set(${NORM_PYPI_NAME}_FOUND
${NORM_PYPI_NAME}-NOTFOUND
CACHE INTERNAL "")
# Always warn or error
message(
${status_level}
"Missing: ${PYPI_NAME} ${ARG_VERSION}\nTry: ${${_Python}_EXECUTABLE} -m pip install ${PYPI_NAME}"
)
else()
if(ARG_VERSION AND PKG_VERSION VERSION_LESS ARG_VERSION)
message(
${status_level}
"Version incorrect: ${PYPI_NAME} ${PKG_VERSION} found, ${ARG_VERSION} required - try upgrading"
)
else()
set(${NORM_PYPI_NAME}_FOUND
YES
CACHE INTERNAL "")
set(${NORM_PYPI_NAME}_VERSION
${PKG_VERSION}
CACHE INTERNAL "")
endif()
if(NOT ARG_QUIET)
message(STATUS "Found ${PYPI_NAME} ${PKG_VERSION}")
endif()
endif()
if(NOT ARG_VERSION OR (NOT PKG_VERSION VERSION_LESS ARG_VERSION))
# We have successfully found a good version, cache to avoid calling again.
endif()
endfunction()
endif()
# --------------------- LTO -------------------------------
include(CheckCXXCompilerFlag)
# Checks whether the given CXX/linker flags can compile and link a cxx file.
# cxxflags and linkerflags are lists of flags to use. The result variable is a
# unique variable name for each set of flags: the compilation result will be
# cached base on the result variable. If the flags work, sets them in
# cxxflags_out/linkerflags_out internal cache variables (in addition to
# ${result}).
function(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out
linkerflags_out)
set(CMAKE_REQUIRED_LIBRARIES ${linkerflags})
check_cxx_compiler_flag("${cxxflags}" ${result})
if(${result})
set(${cxxflags_out}
"${cxxflags}"
PARENT_SCOPE)
set(${linkerflags_out}
"${linkerflags}"
PARENT_SCOPE)
endif()
endfunction()
function(_pybind11_generate_lto target prefer_thin_lto)
if(MINGW)
message(STATUS "${target} disabled (problems with undefined symbols for MinGW for now)")
return()
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
set(cxx_append "")
set(linker_append "")
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT APPLE)
# Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it
set(linker_append ";$<$<CONFIG:MinSizeRel>:-O3>")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND NOT MINGW)
set(cxx_append ";-fno-fat-lto-objects")
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le" OR CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
set(NO_FLTO_ARCH TRUE)
else()
set(NO_FLTO_ARCH FALSE)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang"
AND prefer_thin_lto
AND NOT NO_FLTO_ARCH)
_pybind11_return_if_cxx_and_linker_flags_work(
HAS_FLTO_THIN "-flto=thin${cxx_append}" "-flto=thin${linker_append}"
PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
endif()
if(NOT HAS_FLTO_THIN AND NOT NO_FLTO_ARCH)
_pybind11_return_if_cxx_and_linker_flags_work(
HAS_FLTO "-flto${cxx_append}" "-flto${linker_append}" PYBIND11_LTO_CXX_FLAGS
PYBIND11_LTO_LINKER_FLAGS)
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM")
# IntelLLVM equivalent to LTO is called IPO; also IntelLLVM is WIN32/UNIX
# WARNING/HELP WANTED: This block of code is currently not covered by pybind11 GitHub Actions!
if(WIN32)
_pybind11_return_if_cxx_and_linker_flags_work(
HAS_INTEL_IPO "-Qipo" "-Qipo" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
else()
_pybind11_return_if_cxx_and_linker_flags_work(
HAS_INTEL_IPO "-ipo" "-ipo" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
# Intel equivalent to LTO is called IPO
_pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO "-ipo" "-ipo"
PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
elseif(MSVC)
# cmake only interprets libraries as linker flags when they start with a - (otherwise it
# converts /LTCG to \LTCG as if it was a Windows path). Luckily MSVC supports passing flags
# with - instead of /, even if it is a bit non-standard:
_pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG "/GL" "-LTCG"
PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
endif()
# Enable LTO flags if found, except for Debug builds
if(PYBIND11_LTO_CXX_FLAGS)
# CONFIG takes multiple values in CMake 3.19+, until then we have to use OR
set(is_debug "$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>")
set(not_debug "$<NOT:${is_debug}>")
set(cxx_lang "$<COMPILE_LANGUAGE:CXX>")
if(MSVC AND CMAKE_VERSION VERSION_LESS 3.11)
set(genex "${not_debug}")
else()
set(genex "$<AND:${not_debug},${cxx_lang}>")
endif()
set_property(
TARGET ${target}
APPEND
PROPERTY INTERFACE_COMPILE_OPTIONS "$<${genex}:${PYBIND11_LTO_CXX_FLAGS}>")
if(CMAKE_PROJECT_NAME STREQUAL "pybind11")
message(STATUS "${target} enabled")
endif()
else()
if(CMAKE_PROJECT_NAME STREQUAL "pybind11")
message(STATUS "${target} disabled (not supported by the compiler and/or linker)")
endif()
endif()
if(PYBIND11_LTO_LINKER_FLAGS)
if(CMAKE_VERSION VERSION_LESS 3.11)
set_property(
TARGET ${target}
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES "$<${not_debug}:${PYBIND11_LTO_LINKER_FLAGS}>")
else()
set_property(
TARGET ${target}
APPEND
PROPERTY INTERFACE_LINK_OPTIONS "$<${not_debug}:${PYBIND11_LTO_LINKER_FLAGS}>")
endif()
endif()
endfunction()
if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
add_library(pybind11::lto IMPORTED INTERFACE ${optional_global})
_pybind11_generate_lto(pybind11::lto FALSE)
add_library(pybind11::thin_lto IMPORTED INTERFACE ${optional_global})
_pybind11_generate_lto(pybind11::thin_lto TRUE)
endif()
# ---------------------- pybind11_strip -----------------------------
function(pybind11_strip target_name)
# Strip unnecessary sections of the binary on Linux/macOS
if(CMAKE_STRIP)
if(APPLE)
set(x_opt -x)
endif()
add_custom_command(
TARGET ${target_name}
POST_BUILD
COMMAND ${CMAKE_STRIP} ${x_opt} $<TARGET_FILE:${target_name}>)
endif()
endfunction()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tools/pybind11NewTools.cmake | CMake | # tools/pybind11NewTools.cmake -- Build system for the pybind11 modules
#
# Copyright (c) 2020 Wenzel Jakob <wenzel@inf.ethz.ch> and Henry Schreiner
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
if(CMAKE_VERSION VERSION_LESS 3.12)
message(FATAL_ERROR "You cannot use the new FindPython module with CMake < 3.12")
endif()
include_guard(DIRECTORY)
get_property(
is_config
TARGET pybind11::headers
PROPERTY IMPORTED)
if(pybind11_FIND_QUIETLY)
set(_pybind11_quiet QUIET)
else()
set(_pybind11_quiet "")
endif()
if(NOT Python_FOUND AND NOT Python3_FOUND)
if(NOT DEFINED Python_FIND_IMPLEMENTATIONS)
set(Python_FIND_IMPLEMENTATIONS CPython PyPy)
endif()
# GitHub Actions like activation
if(NOT DEFINED Python_ROOT_DIR AND DEFINED ENV{pythonLocation})
set(Python_ROOT_DIR "$ENV{pythonLocation}")
endif()
find_package(Python 3.6 REQUIRED COMPONENTS Interpreter Development ${_pybind11_quiet})
# If we are in submodule mode, export the Python targets to global targets.
# If this behavior is not desired, FindPython _before_ pybind11.
if(NOT is_config)
set_property(TARGET Python::Python PROPERTY IMPORTED_GLOBAL TRUE)
set_property(TARGET Python::Interpreter PROPERTY IMPORTED_GLOBAL TRUE)
if(TARGET Python::Module)
set_property(TARGET Python::Module PROPERTY IMPORTED_GLOBAL TRUE)
endif()
endif()
endif()
if(Python_FOUND)
set(_Python
Python
CACHE INTERNAL "" FORCE)
elseif(Python3_FOUND)
set(_Python
Python3
CACHE INTERNAL "" FORCE)
endif()
if(PYBIND11_MASTER_PROJECT)
if(${_Python}_INTERPRETER_ID MATCHES "PyPy")
message(STATUS "PyPy ${${_Python}_PyPy_VERSION} (Py ${${_Python}_VERSION})")
else()
message(STATUS "${_Python} ${${_Python}_VERSION}")
endif()
endif()
# If a user finds Python, they may forget to include the Interpreter component
# and the following two steps require it. It is highly recommended by CMake
# when finding development libraries anyway, so we will require it.
if(NOT DEFINED ${_Python}_EXECUTABLE)
message(
FATAL_ERROR
"${_Python} was found without the Interpreter component. Pybind11 requires this component.")
endif()
if(NOT ${_Python}_EXECUTABLE STREQUAL PYBIND11_PYTHON_EXECUTABLE_LAST)
# Detect changes to the Python version/binary in subsequent CMake runs, and refresh config if needed
unset(PYTHON_IS_DEBUG CACHE)
unset(PYTHON_MODULE_EXTENSION CACHE)
set(PYBIND11_PYTHON_EXECUTABLE_LAST
"${${_Python}_EXECUTABLE}"
CACHE INTERNAL "Python executable during the last CMake run")
endif()
if(NOT DEFINED PYTHON_IS_DEBUG)
# Debug check - see https://stackoverflow.com/questions/646518/python-how-to-detect-debug-Interpreter
execute_process(
COMMAND "${${_Python}_EXECUTABLE}" "-c"
"import sys; sys.exit(hasattr(sys, 'gettotalrefcount'))"
RESULT_VARIABLE _PYTHON_IS_DEBUG)
set(PYTHON_IS_DEBUG
"${_PYTHON_IS_DEBUG}"
CACHE INTERNAL "Python debug status")
endif()
# Get the suffix - SO is deprecated, should use EXT_SUFFIX, but this is
# required for PyPy3 (as of 7.3.1)
if(NOT DEFINED PYTHON_MODULE_EXTENSION)
execute_process(
COMMAND
"${${_Python}_EXECUTABLE}" "-c"
"import sys, importlib; s = importlib.import_module('distutils.sysconfig' if sys.version_info < (3, 10) else 'sysconfig'); print(s.get_config_var('EXT_SUFFIX') or s.get_config_var('SO'))"
OUTPUT_VARIABLE _PYTHON_MODULE_EXTENSION
ERROR_VARIABLE _PYTHON_MODULE_EXTENSION_ERR
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(_PYTHON_MODULE_EXTENSION STREQUAL "")
message(
FATAL_ERROR "pybind11 could not query the module file extension, likely the 'distutils'"
"package is not installed. Full error message:\n${_PYTHON_MODULE_EXTENSION_ERR}")
endif()
# This needs to be available for the pybind11_extension function
set(PYTHON_MODULE_EXTENSION
"${_PYTHON_MODULE_EXTENSION}"
CACHE INTERNAL "")
endif()
# Python debug libraries expose slightly different objects before 3.8
# https://docs.python.org/3.6/c-api/intro.html#debugging-builds
# https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
if(PYTHON_IS_DEBUG)
set_property(
TARGET pybind11::pybind11
APPEND
PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
endif()
# Check on every access - since Python can change - do nothing in that case.
if(DEFINED ${_Python}_INCLUDE_DIRS)
# Only add Python for build - must be added during the import for config
# since it has to be re-discovered.
#
# This needs to be a target to be included after the local pybind11
# directory, just in case there there is an installed pybind11 sitting
# next to Python's includes. It also ensures Python is a SYSTEM library.
add_library(pybind11::python_headers INTERFACE IMPORTED)
set_property(
TARGET pybind11::python_headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES
"$<BUILD_INTERFACE:${${_Python}_INCLUDE_DIRS}>")
set_property(
TARGET pybind11::pybind11
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_headers)
set(pybind11_INCLUDE_DIRS
"${pybind11_INCLUDE_DIR}" "${${_Python}_INCLUDE_DIRS}"
CACHE INTERNAL "Directories where pybind11 and possibly Python headers are located")
endif()
# In CMake 3.18+, you can find these separately, so include an if
if(TARGET ${_Python}::Python)
set_property(
TARGET pybind11::embed
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Python)
endif()
# CMake 3.15+ has this
if(TARGET ${_Python}::Module)
set_property(
TARGET pybind11::module
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Module)
else()
set_property(
TARGET pybind11::module
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_link_helper)
endif()
# WITHOUT_SOABI and WITH_SOABI will disable the custom extension handling used by pybind11.
# WITH_SOABI is passed on to python_add_library.
function(pybind11_add_module target_name)
cmake_parse_arguments(PARSE_ARGV 1 ARG
"STATIC;SHARED;MODULE;THIN_LTO;OPT_SIZE;NO_EXTRAS;WITHOUT_SOABI" "" "")
if(ARG_STATIC)
set(lib_type STATIC)
elseif(ARG_SHARED)
set(lib_type SHARED)
else()
set(lib_type MODULE)
endif()
if("${_Python}" STREQUAL "Python")
python_add_library(${target_name} ${lib_type} ${ARG_UNPARSED_ARGUMENTS})
elseif("${_Python}" STREQUAL "Python3")
python3_add_library(${target_name} ${lib_type} ${ARG_UNPARSED_ARGUMENTS})
else()
message(FATAL_ERROR "Cannot detect FindPython version: ${_Python}")
endif()
target_link_libraries(${target_name} PRIVATE pybind11::headers)
if(lib_type STREQUAL "MODULE")
target_link_libraries(${target_name} PRIVATE pybind11::module)
else()
target_link_libraries(${target_name} PRIVATE pybind11::embed)
endif()
if(MSVC)
target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
endif()
# -fvisibility=hidden is required to allow multiple modules compiled against
# different pybind versions to work properly, and for some features (e.g.
# py::module_local). We force it on everything inside the `pybind11`
# namespace; also turning it on for a pybind module compilation here avoids
# potential warnings or issues from having mixed hidden/non-hidden types.
if(NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET)
set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
endif()
if(NOT DEFINED CMAKE_CUDA_VISIBILITY_PRESET)
set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET "hidden")
endif()
# If we don't pass a WITH_SOABI or WITHOUT_SOABI, use our own default handling of extensions
if(NOT ARG_WITHOUT_SOABI AND NOT "WITH_SOABI" IN_LIST ARG_UNPARSED_ARGUMENTS)
pybind11_extension(${target_name})
endif()
if(ARG_NO_EXTRAS)
return()
endif()
if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
if(ARG_THIN_LTO)
target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
else()
target_link_libraries(${target_name} PRIVATE pybind11::lto)
endif()
endif()
# Use case-insensitive comparison to match the result of $<CONFIG:cfgs>
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if(NOT MSVC AND NOT "${uppercase_CMAKE_BUILD_TYPE}" MATCHES DEBUG|RELWITHDEBINFO)
# Strip unnecessary sections of the binary on Linux/macOS
pybind11_strip(${target_name})
endif()
if(MSVC)
target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
endif()
if(ARG_OPT_SIZE)
target_link_libraries(${target_name} PRIVATE pybind11::opt_size)
endif()
endfunction()
function(pybind11_extension name)
# The extension is precomputed
set_target_properties(${name} PROPERTIES PREFIX "" SUFFIX "${PYTHON_MODULE_EXTENSION}")
endfunction()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tools/pybind11Tools.cmake | CMake | # tools/pybind11Tools.cmake -- Build system for the pybind11 modules
#
# Copyright (c) 2020 Wenzel Jakob <wenzel.jakob@epfl.ch>
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# include_guard(global) (pre-CMake 3.10)
if(TARGET pybind11::python_headers)
return()
endif()
# Built-in in CMake 3.5+
include(CMakeParseArguments)
if(pybind11_FIND_QUIETLY)
set(_pybind11_quiet QUIET)
else()
set(_pybind11_quiet "")
endif()
# If this is the first run, PYTHON_VERSION can stand in for PYBIND11_PYTHON_VERSION
if(NOT DEFINED PYBIND11_PYTHON_VERSION AND DEFINED PYTHON_VERSION)
message(WARNING "Set PYBIND11_PYTHON_VERSION to search for a specific version, not "
"PYTHON_VERSION (which is an output). Assuming that is what you "
"meant to do and continuing anyway.")
set(PYBIND11_PYTHON_VERSION
"${PYTHON_VERSION}"
CACHE STRING "Python version to use for compiling modules")
unset(PYTHON_VERSION)
unset(PYTHON_VERSION CACHE)
elseif(DEFINED PYBIND11_PYTHON_VERSION)
# If this is set as a normal variable, promote it
set(PYBIND11_PYTHON_VERSION
"${PYBIND11_PYTHON_VERSION}"
CACHE STRING "Python version to use for compiling modules")
else()
# Make an empty cache variable.
set(PYBIND11_PYTHON_VERSION
""
CACHE STRING "Python version to use for compiling modules")
endif()
# A user can set versions manually too
set(Python_ADDITIONAL_VERSIONS
"3.11;3.10;3.9;3.8;3.7;3.6"
CACHE INTERNAL "")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} MODULE REQUIRED ${_pybind11_quiet})
list(REMOVE_AT CMAKE_MODULE_PATH -1)
# Makes a normal variable a cached variable
macro(_PYBIND11_PROMOTE_TO_CACHE NAME)
set(_tmp_ptc "${${NAME}}")
# CMake 3.21 complains if a cached variable is shadowed by a normal one
unset(${NAME})
set(${NAME}
"${_tmp_ptc}"
CACHE INTERNAL "")
endmacro()
# Cache variables so pybind11_add_module can be used in parent projects
_pybind11_promote_to_cache(PYTHON_INCLUDE_DIRS)
_pybind11_promote_to_cache(PYTHON_LIBRARIES)
_pybind11_promote_to_cache(PYTHON_MODULE_PREFIX)
_pybind11_promote_to_cache(PYTHON_MODULE_EXTENSION)
_pybind11_promote_to_cache(PYTHON_VERSION_MAJOR)
_pybind11_promote_to_cache(PYTHON_VERSION_MINOR)
_pybind11_promote_to_cache(PYTHON_VERSION)
_pybind11_promote_to_cache(PYTHON_IS_DEBUG)
if(PYBIND11_MASTER_PROJECT)
if(PYTHON_MODULE_EXTENSION MATCHES "pypy")
if(NOT DEFINED PYPY_VERSION)
execute_process(
COMMAND ${PYTHON_EXECUTABLE} -c
[=[import sys; sys.stdout.write(".".join(map(str, sys.pypy_version_info[:3])))]=]
OUTPUT_VARIABLE pypy_version)
set(PYPY_VERSION
${pypy_version}
CACHE INTERNAL "")
endif()
message(STATUS "PYPY ${PYPY_VERSION} (Py ${PYTHON_VERSION})")
else()
message(STATUS "PYTHON ${PYTHON_VERSION}")
endif()
endif()
# Only add Python for build - must be added during the import for config since
# it has to be re-discovered.
#
# This needs to be an target to it is included after the local pybind11
# directory, just in case there are multiple versions of pybind11, we want the
# one we expect.
add_library(pybind11::python_headers INTERFACE IMPORTED)
set_property(TARGET pybind11::python_headers PROPERTY INTERFACE_INCLUDE_DIRECTORIES
"$<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>")
set_property(
TARGET pybind11::pybind11
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_headers)
set(pybind11_INCLUDE_DIRS
"${pybind11_INCLUDE_DIR}" "${PYTHON_INCLUDE_DIRS}"
CACHE INTERNAL "Directories where pybind11 and possibly Python headers are located")
# Python debug libraries expose slightly different objects before 3.8
# https://docs.python.org/3.6/c-api/intro.html#debugging-builds
# https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
if(PYTHON_IS_DEBUG)
set_property(
TARGET pybind11::pybind11
APPEND
PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
endif()
# The <3.11 code here does not support release/debug builds at the same time, like on vcpkg
if(CMAKE_VERSION VERSION_LESS 3.11)
set_property(
TARGET pybind11::module
APPEND
PROPERTY
INTERFACE_LINK_LIBRARIES
pybind11::python_link_helper
"$<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:Cygwin>>:$<BUILD_INTERFACE:${PYTHON_LIBRARIES}>>"
)
set_property(
TARGET pybind11::embed
APPEND
PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11 $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)
else()
# The IMPORTED INTERFACE library here is to ensure that "debug" and "release" get processed outside
# of a generator expression - https://gitlab.kitware.com/cmake/cmake/-/issues/18424, as they are
# target_link_library keywords rather than real libraries.
add_library(pybind11::_ClassicPythonLibraries IMPORTED INTERFACE)
target_link_libraries(pybind11::_ClassicPythonLibraries INTERFACE ${PYTHON_LIBRARIES})
target_link_libraries(
pybind11::module
INTERFACE
pybind11::python_link_helper
"$<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:Cygwin>>:pybind11::_ClassicPythonLibraries>")
target_link_libraries(pybind11::embed INTERFACE pybind11::pybind11
pybind11::_ClassicPythonLibraries)
endif()
function(pybind11_extension name)
# The prefix and extension are provided by FindPythonLibsNew.cmake
set_target_properties(${name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
SUFFIX "${PYTHON_MODULE_EXTENSION}")
endfunction()
# Build a Python extension module:
# pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
# [NO_EXTRAS] [THIN_LTO] [OPT_SIZE] source1 [source2 ...])
#
function(pybind11_add_module target_name)
set(options "MODULE;SHARED;EXCLUDE_FROM_ALL;NO_EXTRAS;SYSTEM;THIN_LTO;OPT_SIZE")
cmake_parse_arguments(ARG "${options}" "" "" ${ARGN})
if(ARG_MODULE AND ARG_SHARED)
message(FATAL_ERROR "Can't be both MODULE and SHARED")
elseif(ARG_SHARED)
set(lib_type SHARED)
else()
set(lib_type MODULE)
endif()
if(ARG_EXCLUDE_FROM_ALL)
set(exclude_from_all EXCLUDE_FROM_ALL)
else()
set(exclude_from_all "")
endif()
add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS})
target_link_libraries(${target_name} PRIVATE pybind11::module)
if(ARG_SYSTEM)
message(
STATUS
"Warning: this does not have an effect - use NO_SYSTEM_FROM_IMPORTED if using imported targets"
)
endif()
pybind11_extension(${target_name})
# -fvisibility=hidden is required to allow multiple modules compiled against
# different pybind versions to work properly, and for some features (e.g.
# py::module_local). We force it on everything inside the `pybind11`
# namespace; also turning it on for a pybind module compilation here avoids
# potential warnings or issues from having mixed hidden/non-hidden types.
if(NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET)
set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
endif()
if(NOT DEFINED CMAKE_CUDA_VISIBILITY_PRESET)
set_target_properties(${target_name} PROPERTIES CUDA_VISIBILITY_PRESET "hidden")
endif()
if(ARG_NO_EXTRAS)
return()
endif()
if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
if(ARG_THIN_LTO)
target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
else()
target_link_libraries(${target_name} PRIVATE pybind11::lto)
endif()
endif()
# Use case-insensitive comparison to match the result of $<CONFIG:cfgs>
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if(NOT MSVC AND NOT "${uppercase_CMAKE_BUILD_TYPE}" MATCHES DEBUG|RELWITHDEBINFO)
pybind11_strip(${target_name})
endif()
if(MSVC)
target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
endif()
if(ARG_OPT_SIZE)
target_link_libraries(${target_name} PRIVATE pybind11::opt_size)
endif()
endfunction()
# Provide general way to call common Python commands in "common" file.
set(_Python
PYTHON
CACHE INTERNAL "" FORCE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/setup.py | Python | # Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2**32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j2"]
if "PYOPTIX_CMAKE_ARGS" in os.environ:
cmake_args += [os.environ["PYOPTIX_CMAKE_ARGS"]]
# the following is only needed for 7.0 compiles, because the optix device header of that
# first version included stddef.h.
if "PYOPTIX_STDDEF_DIR" in os.environ:
cmake_args += [
"-DOptiX_STDDEF_DIR={}".format(os.environ["PYOPTIX_STDDEF_DIR"])
]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
print(
"CMAKE CMD: <<<{}>>>".format(
" ".join(["cmake", ext.sourcedir] + cmake_args)
)
)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
setup(
name="optix",
version="0.0.1",
author="Keith Morley",
author_email="kmorley@nvidia.com",
description="Python bindings for NVIDIA OptiX",
long_description="",
ext_modules=[CMakeExtension("optix")],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/CompilerInfo.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# Sets some variables depending on which compiler you are using
#
# USING_GNU_C : gcc is being used for C compiler
# USING_GNU_CXX : g++ is being used for C++ compiler
# USING_CLANG_C : gcc is being used for C compiler
# USING_CLANG_CXX : g++ is being used for C++ compiler
# USING_ICC : icc is being used for C compiler
# USING_ICPC : icpc is being used for C++ compiler
# USING_WINDOWS_CL : Visual Studio's compiler
# USING_WINDOWS_ICL : Intel's Windows compiler
set(USING_KNOWN_C_COMPILER TRUE)
if(CMAKE_COMPILER_IS_GNUCC)
set(USING_GNU_C TRUE)
elseif( CMAKE_C_COMPILER_ID STREQUAL "Intel" )
set(USING_ICC TRUE)
elseif( CMAKE_C_COMPILER_ID STREQUAL "Clang" )
set(USING_CLANG_C TRUE)
elseif( MSVC OR "x${CMAKE_C_COMPILER_ID}" STREQUAL "xMSVC" )
set(USING_WINDOWS_CL TRUE)
else()
set(USING_KNOWN_C_COMPILER FALSE)
endif()
set(USING_KNOWN_CXX_COMPILER TRUE)
if(CMAKE_COMPILER_IS_GNUCXX)
set(USING_GNU_CXX TRUE)
elseif( CMAKE_CXX_COMPILER_ID STREQUAL "Intel" )
set(USING_ICPC TRUE)
elseif( CMAKE_CXX_COMPILER_ID STREQUAL "Clang" )
set(USING_CLANG_CXX TRUE)
elseif( MSVC OR "x${CMAKE_C_COMPILER_ID}" STREQUAL "xMSVC" )
if( NOT USING_WINDOWS_CL )
message( WARNING "Mixing WinCL C++ compiler with non-matching C compiler" )
endif()
else()
set(USING_KNOWN_CXX_COMPILER FALSE)
endif()
if(USING_GNU_C)
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
endif()
# Using unknown compilers
if(NOT USING_KNOWN_C_COMPILER)
FIRST_TIME_MESSAGE("Specified C compiler ${CMAKE_C_COMPILER} is not recognized (gcc, icc). Using CMake defaults.")
endif()
if(NOT USING_KNOWN_CXX_COMPILER)
FIRST_TIME_MESSAGE("Specified CXX compiler ${CMAKE_CXX_COMPILER} is not recognized (g++, icpc). Using CMake defaults.")
endif()
if(USING_WINDOWS_CL)
# We should set this macro as well to get our nice trig functions
add_definitions(-D_USE_MATH_DEFINES)
# Microsoft does some stupid things like #define min and max.
add_definitions(-DNOMINMAX)
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/ConfigCompilerFlags.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# This will create a set of default compiler flags based on the system
# and compiler supplied.
##############################################################
## Compiler libraries
##############################################################
# Some compilers in non default locations have libraries they need in
# order to run properly. You could change your LD_LIBRARY_PATH or you
# could add the path toth to the rpath of the library or executable.
# This helps with that.
SET(COMPILER_LIBRARY_PATH "${COMPILER_LIBRARY_PATH}" CACHE PATH "Path to compiler libraries" FORCE)
MARK_AS_ADVANCED(COMPILER_LIBRARY_PATH)
IF(EXISTS "${COMPILER_LIBRARY_PATH}")
SET(rpath_arg "-Wl,-rpath,\"${COMPILER_LIBRARY_PATH}\"")
# TODO(bigler): remove the old path if there is one
FORCE_ADD_FLAGS(CMAKE_EXE_LINKER_FLAGS ${rpath_arg})
FORCE_ADD_FLAGS(CMAKE_MODULE_LINKER_FLAGS ${rpath_arg})
FORCE_ADD_FLAGS(CMAKE_SHARED_LINKER_FLAGS ${rpath_arg})
ELSE(EXISTS "${COMPILER_LIBRARY_PATH}")
IF(COMPILER_LIBRARY_PATH)
MESSAGE(FATAL_ERROR "COMPILER_LIBRARY_PATH is set, but the path does not exist:\n${COMPILER_LIBRARY_PATH}")
ENDIF(COMPILER_LIBRARY_PATH)
ENDIF(EXISTS "${COMPILER_LIBRARY_PATH}")
##############################################################
## Helper macros
##############################################################
macro(set_flags FLAG NEW_VALUE)
if(${NEW_VALUE})
# first_time_message("Setting compiler flags:")
# first_time_message("${NEW_VALUE} = ${${NEW_VALUE}}")
first_time_set(${FLAG} "${${NEW_VALUE}}" STRING "Default compiler flags" )
endif()
endmacro()
# Appends ${new} to the string of flags in ${flag}_INIT, then uses that variable to set
# ${flags} via the set_flags macro. Note that ${flag}_INIT isn't modified outside of the
# function's scope.
function(append_and_set flag new)
APPEND_TO_STRING(${flag}_INIT "${new}")
set_flags(${flag} ${flag}_INIT)
endfunction()
##############################################################
## System independent
##############################################################
# Initialize these parameters
SET(C_FLAGS "")
SET(C_FLAGS_DEBUG "")
SET(C_FLAGS_RELEASE "")
SET(CXX_FLAGS "")
SET(CXX_FLAGS_DEBUG "")
SET(CXX_FLAGS_RELEASE "")
SET(INTEL_OPT " ")
SET(GCC_OPT " ")
SET(CL_OPT " ")
# Set some defaults. CMake provides some defaults in the INIT
# versions of the variables.
APPEND_TO_STRING(C_FLAGS "${CMAKE_C_FLAGS_INIT}")
#
APPEND_TO_STRING(CXX_FLAGS "${CMAKE_CXX_FLAGS_INIT}")
# We want to enable aggressive warnings for everyone to avoid unexpected build
# farm failures. This can only be configured from the command line or from the
# caller's CMakeLists.txt.
if(NOT DEFINED OPTIX_USE_AGGRESSIVE_WARNINGS)
set(OPTIX_USE_AGGRESSIVE_WARNINGS ON)
endif()
#############################################################
# Set the default warning levels for each compiler. Where the compiler runs on
# multiple architectures, the flags are architecture independent.
#############################################################
if(OPTIX_USE_AGGRESSIVE_WARNINGS)
if (USING_CLANG_CXX)
# Extra warning suppression that clang warns about
set(clang_warnings "-Wno-unused-function -Wno-unused-private-field -Wno-unused-const-variable -Wno-deprecated-declarations -Wno-missing-braces")
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG(-Wno-inconsistent-missing-override CXX_ACCEPTS_NO_INCONSISTENT_MISSING_OVERRIDE)
if(CXX_ACCEPTS_NO_INCONSISTENT_MISSING_OVERRIDE)
# clang 8.0 warns on missing override decorators
set(clang_warnings "${clang_warnings} -Wno-inconsistent-missing-override")
endif()
endif()
# Needed for corelib's use of deprecated sysctl.h
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG(-Wno-cpp OPTIX_CXX_ACCEPTS_NO_CPP)
if(OPTIX_CXX_ACCEPTS_NO_CPP)
set(OPTIX_NO_CPP -Wno-cpp)
endif()
SET(CXX_WARNING_FLAGS "-Wall -Wsign-compare -Wno-multichar ${clang_warnings} ${OPTIX_NO_CPP}")
SET(C_WARNING_FLAGS "${CXX_WARNING_FLAGS} -Wstrict-prototypes -Wdeclaration-after-statement")
if(WARNINGS_AS_ERRORS)
APPEND_TO_STRING(C_WARNING_FLAGS "-Werror")
APPEND_TO_STRING(CXX_WARNING_FLAGS "-Werror")
endif()
else()
set(C_WARNING_FLAGS " ")
set(CXX_WARNING_FLAGS " ")
endif()
SET(DEBUG_FLAGS "-O0 -g3")
# We might consider adding -ffast-math.
SET(RELEASE_FLAGS "-O3 -DNDEBUG -g3 -funroll-loops")
IF (USING_GNU_C OR USING_CLANG_C)
APPEND_TO_STRING(C_FLAGS ${C_WARNING_FLAGS})
APPEND_TO_STRING(C_FLAGS_DEBUG ${DEBUG_FLAGS})
APPEND_TO_STRING(C_FLAGS_RELEASE ${RELEASE_FLAGS})
ENDIF()
IF (USING_GNU_CXX OR USING_CLANG_CXX)
APPEND_TO_STRING(CXX_FLAGS ${CXX_WARNING_FLAGS})
APPEND_TO_STRING(CXX_FLAGS_DEBUG "${DEBUG_FLAGS}")
APPEND_TO_STRING(CXX_FLAGS_RELEASE ${RELEASE_FLAGS})
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG(-Wno-unused-result OPTIX_CXX_ACCEPTS_NO_UNUSED_RESULT)
if(OPTIX_CXX_ACCEPTS_NO_UNUSED_RESULT)
set(OPTIX_NO_UNUSED_RESULT -Wno-unused-result)
endif()
ENDIF()
########################
# Windows flags
# /W3 - more warnings
# /WX - warnings as errors
#
# Disable these warnings:
# /wd4355 - 'this' used in initializer list
# /wd4996 - strncpy and other functions are unsafe
# /wd4800 - forcing value to bool 'true' or 'false' (performance warning)
#
# Turn on warnings for level /W3 (/w3XXXX):
# /w34101 - unreference local variable
# /w34189 - local variable is initialized but not referenced
# /w34018 - 'expression' : signed/unsigned mismatch
# /w34389 - 'operator' : signed/unsigned mismatch
if( WIN32 )
set( WARNING_FLAGS "/W3" )
if( WARNINGS_AS_ERRORS )
set( WARNING_FLAGS "${WARNING_FLAGS} /WX" )
endif()
if(OPTIX_USE_AGGRESSIVE_WARNINGS)
set(WARNING_FLAGS "${WARNING_FLAGS} /wd4355 /wd4996 /wd4800 /w34101 /w34189 /w34018 /w34389")
else()
set(WARNING_FLAGS "${WARNING_FLAGS} /wd4355 /wd4996")
endif()
SET(DEBUG_FLAGS "")
endif()
# Add /MP to get file-level compilation parallelism
SET(PARALLEL_COMPILE_FLAGS /MP)
IF (USING_WINDOWS_CL)
APPEND_TO_STRING(C_FLAGS "${PARALLEL_COMPILE_FLAGS} ${WARNING_FLAGS}")
APPEND_TO_STRING(CXX_FLAGS "${PARALLEL_COMPILE_FLAGS} ${WARNING_FLAGS}")
# /Ox - Full Optimization (should supperseed the /O2 optimization
# /Ot - Favor fast code over small code
# /GL (not used) - Enable link time code generation
# /arch:SSE - Enable SSE instructions (only for 32 bit builds)
# /fp:fast - Use Fast floating point model
string(REPLACE "/O2" "/Ox" CMAKE_C_FLAGS_RELEASE_INIT "${CMAKE_C_FLAGS_RELEASE_INIT}")
string(REPLACE "/O2" "/Ox" CMAKE_CXX_FLAGS_RELEASE_INIT "${CMAKE_CXX_FLAGS_RELEASE_INIT}")
set(CL_OPT "/Ot /fp:fast")
if (CMAKE_SIZEOF_VOID_P EQUAL 4)
APPEND_TO_STRING(CL_OPT "/arch:SSE")
endif()
append_and_set(CMAKE_C_FLAGS_RELEASE "${CL_OPT}")
append_and_set(CMAKE_CXX_FLAGS_RELEASE "${CL_OPT}")
# Turn these on if you turn /GL on
# append_and_set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/ltcg")
# append_and_set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "/ltcg")
# append_and_set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "/ltcg")
ENDIF(USING_WINDOWS_CL)
SET(WARNING_FLAGS "/D_CRT_SECURE_NO_DEPRECATE=1 /Qstd=c99")
IF (USING_WINDOWS_ICL)
# These are the warnings
APPEND_TO_STRING(C_FLAGS ${WARNING_FLAGS})
APPEND_TO_STRING(CXX_FLAGS ${WARNING_FLAGS})
ENDIF(USING_WINDOWS_ICL)
##############################################################
## Check for SSE 4.1 support
##############################################################
if(USING_GNU_C OR USING_CLANG_C)
include(CheckCCompilerFlag)
CHECK_C_COMPILER_FLAG(-msse4.1 SSE_41_AVAILABLE)
elseif(USING_WINDOWS_CL)
set(SSE_41_AVAILABLE 1)
else()
message(WARNING "Unknown Compiler. Disabling SSE 4.1 support")
set(SSE_41_AVAILABLE 0)
endif()
get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
configure_file("${CMAKE_CURRENT_LIST_DIR}/sse_support.h.in" "${CMAKE_BINARY_DIR}/include/sse_support.h")
##############################################################
## Apple
##############################################################
IF(APPLE)
if (USING_CLANG_CXX)
# We have to use libc++ because the libstdc++ does not support std::move
# https://cplusplusmusings.wordpress.com/2012/07/05/clang-and-standard-libraries-on-mac-os-x/
APPEND_TO_STRING(CXX_FLAGS "-stdlib=libc++")
APPEND_TO_STRING(CXX_FLAGS "-mmacosx-version-min=10.8")
endif()
APPEND_TO_STRING(GCC_ARCH "nocona")
APPEND_TO_STRING(GCC_ARCH "prescott")
APPEND_TO_STRING(GCC_OPT "-msse -msse2 -msse3")
if( USING_GNU_C OR USING_GNU_CXX)
APPEND_TO_STRING(GCC_OPT "-mfpmath=sse")
endif()
ENDIF(APPLE)
##############################################################
## X86
##############################################################
# On apple machines CMAKE_SYSTEM_PROCESSOR return i386.
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "i686" OR
CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
APPEND_TO_STRING(GCC_OPT "-msse -msse2 -msse3 -mfpmath=sse")
# mtune options
INCLUDE(LinuxCPUInfo)
# AMD
IF(VENDOR_ID MATCHES "AuthenticAMD")
APPEND_TO_STRING(GCC_ARCH "opteron") # supports 64 bit instructions
APPEND_TO_STRING(GCC_ARCH "athlon-xp") # no support for 64 bit instructions
APPEND_TO_STRING(INTEL_OPT "-xW -unroll4")
ENDIF(VENDOR_ID MATCHES "AuthenticAMD")
# Intel
IF(VENDOR_ID MATCHES "GenuineIntel")
IF(CPU_FAMILY EQUAL 6)
IF(MODEL EQUAL 15) # (F)
# This is likely a Core 2
# APPEND_TO_STRING(GCC_ARCH "kentsfield") # QX6700
APPEND_TO_STRING(GCC_ARCH "nocona")
APPEND_TO_STRING(GCC_ARCH "prescott")
# -xT Intel(R) Core(TM)2 Duo processors, Intel(R) Core(TM)2 Quad
# processors, and Intel(R) Xeon(R) processors with SSSE3
APPEND_TO_STRING(INTEL_OPT "-xT -unroll4")
ENDIF(MODEL EQUAL 15)
IF(MODEL EQUAL 14) # (E)
# This is likely a Core Single or Core Duo. This doesn't
# support EM64T.
APPEND_TO_STRING(GCC_ARCH "prescott")
ENDIF(MODEL EQUAL 14)
IF(MODEL LESS 14) #(0-D)
# This is likely a Pentium3, Pentium M. Some pentium 3s don't
# support sse2, in that case fall back to the i686 code.
APPEND_TO_STRING(GCC_ARCH "pentium-m")
APPEND_TO_STRING(INTEL_OPT "-xB")
ENDIF(MODEL LESS 14)
ENDIF(CPU_FAMILY EQUAL 6)
IF(CPU_FAMILY EQUAL 15)
# These are your Pentium 4 and friends
IF(FLAGS MATCHES "em64t")
APPEND_TO_STRING(GCC_ARCH "nocona")
APPEND_TO_STRING(GCC_ARCH "prescott")
ENDIF(FLAGS MATCHES "em64t")
APPEND_TO_STRING(GCC_ARCH "pentium4")
APPEND_TO_STRING(INTEL_OPT "-xP -unroll4 -msse3")
ENDIF(CPU_FAMILY EQUAL 15)
ENDIF(VENDOR_ID MATCHES "GenuineIntel")
APPEND_TO_STRING(GCC_ARCH "i686")
###########################################################
# Some x86_64 specific stuff
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
APPEND_TO_STRING(INTEL_OPT "")
ENDIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
###########################################################
ENDIF (CMAKE_SYSTEM_PROCESSOR MATCHES "i686" OR
CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
##############################################################
## Configure Architecture
##############################################################
# Cycle through the GCC_ARCH args and see which one will pass first.
# Guard this evaluation with PASSED_FIRST_CONFIGURE, to make sure it
# is only done the first time.
IF(USING_GNU_C OR USING_GNU_CXX OR USING_CLANG_C OR USING_CLANG_CXX AND NOT PASSED_FIRST_CONFIGURE)
SEPARATE_ARGUMENTS(GCC_ARCH)
# Change the extension based of if we are using both gcc and g++.
IF(USING_GNU_C OR USING_CLANG_C)
SET(EXTENSION "c")
ELSE()
SET(EXTENSION "cc")
ENDIF()
SET(COMPILE_TEST_SOURCE ${CMAKE_BINARY_DIR}/test/compile-test.${EXTENSION})
CONFIGURE_FILE("${CMAKE_CURRENT_LIST_DIR}/testmain.c"
${COMPILE_TEST_SOURCE} IMMEDIATE COPYONLY)
FOREACH(ARCH ${GCC_ARCH})
IF(NOT GOOD_ARCH)
# MESSAGE("Testing ARCH = ${ARCH}")
SET(ARCH_FLAG "-march=${ARCH} -mtune=${ARCH}")
SET(COMPILER_ARGS "${ARCH_FLAG} ${C_FLAGS_RELEASE} ${C_FLAGS} ${GCC_OPT}")
TRY_RUN(RUN_RESULT_VAR COMPILE_RESULT_VAR
${CMAKE_BINARY_DIR}/test ${COMPILE_TEST_SOURCE}
CMAKE_FLAGS
-DCOMPILE_DEFINITIONS:STRING=${COMPILER_ARGS}
OUTPUT_VARIABLE OUTPUT
)
# MESSAGE("OUTPUT = ${OUTPUT}")
# MESSAGE("COMPILER_ARGS = ${COMPILER_ARGS}")
# MESSAGE("RUN_RESULT_VAR = ${RUN_RESULT_VAR}")
# MESSAGE("COMPILE_RESULT_VAR = ${COMPILE_RESULT_VAR}")
IF(RUN_RESULT_VAR EQUAL 0)
SET(GOOD_ARCH ${ARCH})
ENDIF(RUN_RESULT_VAR EQUAL 0)
ENDIF(NOT GOOD_ARCH)
ENDFOREACH(ARCH)
IF(GOOD_ARCH)
PREPEND_TO_STRING(GCC_OPT "-march=${GOOD_ARCH} -mtune=${GOOD_ARCH}")
ENDIF(GOOD_ARCH)
# MESSAGE("GOOD_ARCH = ${GOOD_ARCH}")
ENDIF()
# MESSAGE("CMAKE_SYSTEM_PROCESSOR = ${CMAKE_SYSTEM_PROCESSOR}")
# MESSAGE("APPLE = ${APPLE}")
# MESSAGE("LINUX = ${LINUX}")
##############################################################
## Set the defaults
##############################################################
# MESSAGE("CMAKE_C_COMPILER = ${CMAKE_C_COMPILER}")
# MESSAGE("CMAKE_CXX_COMPILER = ${CMAKE_CXX_COMPILER}")
# MESSAGE("USING_GNU_C = ${USING_GNU_C}")
# MESSAGE("USING_GNU_CXX = ${USING_GNU_CXX}")
# MESSAGE("USING_ICC = ${USING_ICC}")
# MESSAGE("USING_ICPC = ${USING_ICPC}")
# MESSAGE("CMAKE version = ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION}")
# MESSAGE("CMAKE_SYSTEM = ${CMAKE_SYSTEM}")
# MESSAGE("CMAKE_SYSTEM_PROCESSOR = ${CMAKE_SYSTEM_PROCESSOR}")
MACRO(ADD_COMPILER_FLAG COMPILER FLAGS NEW_FLAG)
IF(${COMPILER})
PREPEND_TO_STRING(${FLAGS} ${${NEW_FLAG}})
ENDIF(${COMPILER})
ENDMACRO(ADD_COMPILER_FLAG)
ADD_COMPILER_FLAG(USING_ICC C_FLAGS_RELEASE INTEL_OPT)
ADD_COMPILER_FLAG(USING_ICPC CXX_FLAGS_RELEASE INTEL_OPT)
ADD_COMPILER_FLAG(USING_GNU_C C_FLAGS_RELEASE GCC_OPT)
ADD_COMPILER_FLAG(USING_GNU_CXX CXX_FLAGS_RELEASE GCC_OPT)
ADD_COMPILER_FLAG(USING_CLANG_C C_FLAGS_RELEASE GCC_OPT)
ADD_COMPILER_FLAG(USING_CLANG_CXX CXX_FLAGS_RELEASE GCC_OPT)
IF(UNIX)
APPEND_TO_STRING(C_FLAGS "-fPIC")
APPEND_TO_STRING(CXX_FLAGS "-fPIC")
ENDIF(UNIX)
set_flags(CMAKE_C_FLAGS C_FLAGS)
set_flags(CMAKE_C_FLAGS_DEBUG C_FLAGS_DEBUG)
set_flags(CMAKE_C_FLAGS_RELEASE C_FLAGS_RELEASE)
set_flags(CMAKE_CXX_FLAGS CXX_FLAGS)
set_flags(CMAKE_CXX_FLAGS_DEBUG CXX_FLAGS_DEBUG)
set_flags(CMAKE_CXX_FLAGS_RELEASE CXX_FLAGS_RELEASE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/CopyDLL.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# This script copies one of two supplied dlls into the build directory based on the build configuration.
# build_configuration - Should be passed in via:
# if(CMAKE_GENERATOR MATCHES "Visual Studio")
# set( build_configuration "$(ConfigurationName)" )
# else()
# set( build_configuration "${CMAKE_BUILD_TYPE}")
# endif()
#
# -D build_configuration:STRING=${build_configuration}
# output_directory - should be passed in via the following. If not supplied the current output directory is used.
#
# -D "output_directory:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}"
# source_dll - should be the release version or the single version if you don't have a debug version
#
# -D "source_dll:FILE=${path_to_source_dll}"
# source_debug_dll - should be the debug version of the dll (optional)
#
# -D "source_debug_dll:FILE=${path_to_source_debug_dll}"
if(NOT DEFINED build_configuration)
message(FATAL_ERROR "build_configuration not specified")
endif()
if(NOT DEFINED output_directory)
set(output_directory ".")
endif()
if(NOT DEFINED source_dll)
message(FATAL_ERROR "source_dll not specified")
endif()
if(NOT DEFINED source_debug_dll)
set(source_debug_dll "${source_dll}")
endif()
# Compute the file name
if(build_configuration STREQUAL Debug)
set(source "${source_debug_dll}")
else()
set(source "${source_dll}")
endif()
get_filename_component(filename "${source}" NAME)
set(dest "${output_directory}/${filename}")
message(STATUS "Copying if different ${source} to ${dest}")
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${source}" "${dest}"
RESULT_VARIABLE result
)
if(result)
message(FATAL_ERROR "Error copying dll")
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindCUDA.cmake | CMake | #.rst:
# FindCUDA
# --------
#
# Tools for building CUDA C files: libraries and build dependencies.
#
# This script locates the NVIDIA CUDA C tools. It should work on linux,
# windows, and mac and should be reasonably up to date with CUDA C
# releases.
#
# This script makes use of the standard find_package arguments of
# <VERSION>, REQUIRED and QUIET. CUDA_FOUND will report if an
# acceptable version of CUDA was found.
#
# The script will prompt the user to specify CUDA_TOOLKIT_ROOT_DIR if
# the prefix cannot be determined by the location of nvcc in the system
# path and REQUIRED is specified to find_package(). To use a different
# installed version of the toolkit set the environment variable
# CUDA_BIN_PATH before running cmake (e.g.
# CUDA_BIN_PATH=/usr/local/cuda1.0 instead of the default
# /usr/local/cuda) or set CUDA_TOOLKIT_ROOT_DIR after configuring. If
# you change the value of CUDA_TOOLKIT_ROOT_DIR, various components that
# depend on the path will be relocated.
#
# It might be necessary to set CUDA_TOOLKIT_ROOT_DIR manually on certain
# platforms, or to use a cuda runtime not installed in the default
# location. In newer versions of the toolkit the cuda library is
# included with the graphics driver- be sure that the driver version
# matches what is needed by the cuda runtime version.
#
# The following variables affect the behavior of the macros in the
# script (in alphebetical order). Note that any of these flags can be
# changed multiple times in the same directory before calling
# CUDA_ADD_EXECUTABLE, CUDA_ADD_LIBRARY, CUDA_COMPILE, CUDA_COMPILE_PTX,
# CUDA_COMPILE_FATBIN, CUDA_COMPILE_CUBIN or CUDA_WRAP_SRCS::
#
# CUDA_64_BIT_DEVICE_CODE (Default matches host bit size)
# -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code.
# Note that making this different from the host code when generating object
# or C files from CUDA code just won't work, because size_t gets defined by
# nvcc in the generated source. If you compile to PTX and then load the
# file yourself, you can mix bit sizes between device and host.
#
# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON)
# -- Set to ON if you want the custom build rule to be attached to the source
# file in Visual Studio. Turn OFF if you add the same cuda file to multiple
# targets.
#
# This allows the user to build the target from the CUDA file; however, bad
# things can happen if the CUDA source file is added to multiple targets.
# When performing parallel builds it is possible for the custom build
# command to be run more than once and in parallel causing cryptic build
# errors. VS runs the rules for every source file in the target, and a
# source can have only one rule no matter how many projects it is added to.
# When the rule is run from multiple targets race conditions can occur on
# the generated file. Eventually everything will get built, but if the user
# is unaware of this behavior, there may be confusion. It would be nice if
# this script could detect the reuse of source files across multiple targets
# and turn the option off for the user, but no good solution could be found.
#
# CUDA_BUILD_CUBIN (Default OFF)
# -- Set to ON to enable and extra compilation pass with the -cubin option in
# Device mode. The output is parsed and register, shared memory usage is
# printed during build.
#
# CUDA_BUILD_EMULATION (Default OFF for device mode)
# -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files
# when CUDA_BUILD_EMULATION is TRUE.
#
# CUDA_ENABLE_BATCHING (Default OFF)
# -- Set to ON to enable batch compilation of CUDA source files. This only has
# effect on Visual Studio targets
#
# CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR)
# -- Set to the path you wish to have the generated files placed. If it is
# blank output files will be placed in CMAKE_CURRENT_BINARY_DIR.
# Intermediate files will always be placed in
# CMAKE_CURRENT_BINARY_DIR/CMakeFiles.
#
# CUDA_GENERATE_DEPENDENCIES_DURING_CONFIGURE (Default ON for VS,
# OFF otherwise)
# -- Instead of waiting until build time compute dependencies, do it during
# configure time. Note that dependencies are still generated during
# build, so that if they change the build system can be updated. This
# mainly removes the need for configuring once after the first build to
# load the dependies into the build system.
#
# CUDA_CHECK_DEPENDENCIES_DURING_COMPILE (Default ON for VS,
# OFF otherwise)
# -- During build, the file level dependencies are checked. If all
# dependencies are older than the generated file, the generated file isn't
# compiled but touched (time stamp updated) so the driving build system
# thinks it has been compiled.
#
# CUDA_HOST_COMPILATION_CPP (Default ON)
# -- Set to OFF for C compilation of host code.
#
# CUDA_HOST_COMPILER (Default CMAKE_C_COMPILER, $(VCInstallDir)/bin for VS)
# -- Set the host compiler to be used by nvcc. Ignored if -ccbin or
# --compiler-bindir is already present in the CUDA_NVCC_FLAGS or
# CUDA_NVCC_FLAGS_<CONFIG> variables. For Visual Studio targets
# $(VCInstallDir)/bin is a special value that expands out to the path when
# the command is run from within VS.
#
# CUDA_NVCC_FLAGS
# CUDA_NVCC_FLAGS_<CONFIG>
# -- Additional NVCC command line arguments. NOTE: multiple arguments must be
# semi-colon delimited (e.g. --compiler-options;-Wall)
#
# CUDA_PROPAGATE_HOST_FLAGS (Default ON)
# -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration
# dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the
# host compiler through nvcc's -Xcompiler flag. This helps make the
# generated host code match the rest of the system better. Sometimes
# certain flags give nvcc problems, and this will help you turn the flag
# propagation off. This does not affect the flags supplied directly to nvcc
# via CUDA_NVCC_FLAGS or through the OPTION flags specified through
# CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for
# shared library compilation are not affected by this flag.
#
# CUDA_SEPARABLE_COMPILATION (Default OFF)
# -- If set this will enable separable compilation for all CUDA runtime object
# files. If used outside of CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY
# (e.g. calling CUDA_WRAP_SRCS directly),
# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME and
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS should be called.
#
# CUDA_SOURCE_PROPERTY_FORMAT
# -- If this source file property is set, it can override the format specified
# to CUDA_WRAP_SRCS (OBJ, PTX, CUBIN, or FATBIN). If an input source file
# is not a .cu file, setting this file will cause it to be treated as a .cu
# file. See documentation for set_source_files_properties on how to set
# this property.
#
# CUDA_USE_STATIC_CUDA_RUNTIME (Default ON)
# -- When enabled the static version of the CUDA runtime library will be used
# in CUDA_LIBRARIES. If the version of CUDA configured doesn't support
# this option, then it will be silently disabled.
#
# CUDA_VERBOSE_BUILD (Default OFF)
# -- Set to ON to see all the commands used when building the CUDA file. When
# using a Makefile generator the value defaults to VERBOSE (run make
# VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will
# always print the output.
#
# The script creates the following macros (in alphebetical order)::
#
# CUDA_ADD_CUFFT_TO_TARGET( cuda_target )
# -- Adds the cufft library to the target (can be any target). Handles whether
# you are in emulation mode or not.
#
# CUDA_ADD_CUBLAS_TO_TARGET( cuda_target )
# -- Adds the cublas library to the target (can be any target). Handles
# whether you are in emulation mode or not.
#
# CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ...
# [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
# -- Creates an executable "cuda_target" which is made up of the files
# specified. All of the non CUDA C files are compiled using the standard
# build rules specified by CMAKE and the cuda files are compiled to object
# files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is
# added automatically to include_directories(). Some standard CMake target
# calls can be used on the target after calling this macro
# (e.g. set_target_properties and target_link_libraries), but setting
# properties that adjust compilation flags will not affect code compiled by
# nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE,
# CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS.
#
# CUDA_ADD_LIBRARY( cuda_target file0 file1 ...
# [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
# -- Same as CUDA_ADD_EXECUTABLE except that a library is created.
#
# CUDA_BUILD_CLEAN_TARGET()
# -- Creates a convience target that deletes all the dependency files
# generated. You should make clean after running this target to ensure the
# dependency files get regenerated.
#
# CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE]
# [OPTIONS ...] )
# -- Returns a list of generated files from the input source files to be used
# with ADD_LIBRARY or ADD_EXECUTABLE.
#
# CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] )
# -- Returns a list of PTX files generated from the input source files.
#
# CUDA_COMPILE_FATBIN( generated_files file0 file1 ... [OPTIONS ...] )
# -- Returns a list of FATBIN files generated from the input source files.
#
# CUDA_COMPILE_CUBIN( generated_files file0 file1 ... [OPTIONS ...] )
# -- Returns a list of CUBIN files generated from the input source files.
#
# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME( output_file_var
# cuda_target
# object_files )
# -- Compute the name of the intermediate link file used for separable
# compilation. This file name is typically passed into
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS. output_file_var is produced
# based on cuda_target the list of objects files that need separable
# compilation as specified by object_files. If the object_files list is
# empty, then output_file_var will be empty. This function is called
# automatically for CUDA_ADD_LIBRARY and CUDA_ADD_EXECUTABLE. Note that
# this is a function and not a macro.
#
# CUDA_INCLUDE_DIRECTORIES( path0 path1 ... )
# -- Sets the directories that should be passed to nvcc
# (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu
# files.
#
#
# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS( output_file_var cuda_target
# nvcc_flags object_files)
# -- Generates the link object required by separable compilation from the given
# object files. This is called automatically for CUDA_ADD_EXECUTABLE and
# CUDA_ADD_LIBRARY, but can be called manually when using CUDA_WRAP_SRCS
# directly. When called from CUDA_ADD_LIBRARY or CUDA_ADD_EXECUTABLE the
# nvcc_flags passed in are the same as the flags passed in via the OPTIONS
# argument. The only nvcc flag added automatically is the bitness flag as
# specified by CUDA_64_BIT_DEVICE_CODE. Note that this is a function
# instead of a macro.
#
# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures])
# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures
# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...)
# - "Auto" detects local machine GPU compute arch at runtime.
# - "Common" and "All" cover common and entire subsets of architectures
# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
# NAME: Fermi Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal
# NUM: Any number. Only those pairs are currently accepted by NVCC though:
# 2.0 2.1 3.0 3.2 3.5 3.7 5.0 5.2 5.3 6.0 6.2
# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable}
# Additionally, sets ${out_variable}_readable to the resulting numeric list
# Example:
# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell)
# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS})
#
# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA
# Note that this is a function instead of a macro.
#
# CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ...
# [STATIC | SHARED | MODULE] [OPTIONS ...] )
# -- This is where all the magic happens. CUDA_ADD_EXECUTABLE,
# CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this
# function under the hood.
#
# Given the list of files (file0 file1 ... fileN) this macro generates
# custom commands that generate either PTX or linkable objects (use "PTX" or
# "OBJ" for the format argument to switch). Files that don't end with .cu
# or have the HEADER_FILE_ONLY property are ignored.
#
# The arguments passed in after OPTIONS are extra command line options to
# give to nvcc. You can also specify per configuration options by
# specifying the name of the configuration followed by the options. General
# options must precede configuration specific options. Not all
# configurations need to be specified, only the ones provided will be used.
#
# OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag"
# DEBUG -g
# RELEASE --use_fast_math
# RELWITHDEBINFO --use_fast_math;-g
# MINSIZEREL --use_fast_math
#
# For certain configurations (namely VS generating object files with
# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will
# be produced for the given cuda file. This is because when you add the
# cuda file to Visual Studio it knows that this file produces an object file
# and will link in the resulting object file automatically.
#
# This script will also generate a separate cmake script that is used at
# build time to invoke nvcc. This is for several reasons.
#
# 1. nvcc can return negative numbers as return values which confuses
# Visual Studio into thinking that the command succeeded. The script now
# checks the error codes and produces errors when there was a problem.
#
# 2. nvcc has been known to not delete incomplete results when it
# encounters problems. This confuses build systems into thinking the
# target was generated when in fact an unusable file exists. The script
# now deletes the output files if there was an error.
#
# 3. By putting all the options that affect the build into a file and then
# make the build rule dependent on the file, the output files will be
# regenerated when the options change.
#
# This script also looks at optional arguments STATIC, SHARED, or MODULE to
# determine when to target the object compilation for a shared library.
# BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in
# CUDA_ADD_LIBRARY. On some systems special flags are added for building
# objects intended for shared libraries. A preprocessor macro,
# <target_name>_EXPORTS is defined when a shared library compilation is
# detected.
#
# Flags passed into add_definitions with -D or /D are passed along to nvcc.
#
#
#
# The script defines the following variables::
#
# CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc.
# CUDA_VERSION_MINOR -- The minor version.
# CUDA_VERSION
# CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR
# CUDA_HAS_FP16 -- Whether a short float (float16,fp16) is supported.
#
# CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set).
# CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the
# SDK. This script will not directly support finding
# specific libraries or headers, as that isn't
# supported by NVIDIA. If you want to change
# libraries when the path changes see the
# FindCUDA.cmake script for an example of how to clear
# these variables. There are also examples of how to
# use the CUDA_SDK_ROOT_DIR to locate headers or
# libraries, if you so choose (at your own risk).
# CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically
# for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY.
# CUDA_LIBRARIES -- Cuda RT library.
# CUDA_CUDA_LIBRARY -- Cuda driver API library.
# CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT
# implementation (alternative to:
# CUDA_ADD_CUFFT_TO_TARGET macro)
# CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS
# implementation (alternative to:
# CUDA_ADD_CUBLAS_TO_TARGET macro).
# CUDA_cudart_static_LIBRARY -- Statically linkable cuda runtime library.
# Only available for CUDA version 5.5+
# CUDA_cudadevrt_LIBRARY -- Device runtime library.
# Required for separable compilation.
# CUDA_cupti_LIBRARY -- CUDA Profiling Tools Interface library.
# Only available for CUDA version 4.0+.
# CUDA_curand_LIBRARY -- CUDA Random Number Generation library.
# Only available for CUDA version 3.2+.
# CUDA_cusolver_LIBRARY -- CUDA Direct Solver library.
# Only available for CUDA version 7.0+.
# CUDA_cusparse_LIBRARY -- CUDA Sparse Matrix library.
# Only available for CUDA version 3.2+.
# CUDA_npp_LIBRARY -- NVIDIA Performance Primitives lib.
# Only available for CUDA version 4.0+.
# CUDA_nppc_LIBRARY -- NVIDIA Performance Primitives lib (core).
# Only available for CUDA version 5.5+.
# CUDA_nppi_LIBRARY -- NVIDIA Performance Primitives lib (image processing).
# Only available for CUDA version 5.5+.
# CUDA_npps_LIBRARY -- NVIDIA Performance Primitives lib (signal processing).
# Only available for CUDA version 5.5+.
# CUDA_nvcuvenc_LIBRARY -- CUDA Video Encoder library.
# Only available for CUDA version 3.2+.
# Windows only.
# CUDA_nvcuvid_LIBRARY -- CUDA Video Decoder library.
# Only available for CUDA version 3.2+.
# Windows only.
#
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# Copyright (c) 2007-2009
# Scientific Computing and Imaging Institute, University of Utah
#
# This code is licensed under the MIT License. See the FindCUDA.cmake script
# for the text of the license.
# The MIT License
#
# License for the specific language governing rights and limitations under
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
# FindCUDA.cmake
# This macro helps us find the location of helper files we will need the full path to
macro(CUDA_FIND_HELPER_FILE _name _extension)
set(_full_name "${_name}.${_extension}")
# CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being
# processed. Using this variable, we can pull out the current path, and
# provide a way to get access to the other files we need local to here.
get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
set(CUDA_${_name} "${CMAKE_CURRENT_LIST_DIR}/FindCUDA/${_full_name}")
if(NOT EXISTS "${CUDA_${_name}}")
set(error_message "${_full_name} not found in ${CMAKE_CURRENT_LIST_DIR}/FindCUDA")
if(CUDA_FIND_REQUIRED)
message(FATAL_ERROR "${error_message}")
else()
if(NOT CUDA_FIND_QUIETLY)
message(STATUS "${error_message}")
endif()
endif()
endif()
# Set this variable as internal, so the user isn't bugged with it.
set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE)
endmacro()
#####################################################################
## CUDA_INCLUDE_NVCC_DEPENDENCIES
##
# So we want to try and include the dependency file if it exists. If
# it doesn't exist then we need to create an empty one, so we can
# include it.
# If it does exist, then we need to check to see if all the files it
# depends on exist. If they don't then we should clear the dependency
# file and regenerate it later. This covers the case where a header
# file has disappeared or moved.
macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file)
set(CUDA_NVCC_DEPEND)
set(CUDA_NVCC_DEPEND_REGENERATE FALSE)
# Include the dependency file. Create it first if it doesn't exist . The
# INCLUDE puts a dependency that will force CMake to rerun and bring in the
# new info when it changes. DO NOT REMOVE THIS (as I did and spent a few
# hours figuring out why it didn't work.
if(NOT EXISTS ${dependency_file})
file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
endif()
# Always include this file to force CMake to run again next
# invocation and rebuild the dependencies.
#message("including dependency_file = ${dependency_file}")
include(${dependency_file})
# Now we need to verify the existence of all the included files
# here. If they aren't there we need to just blank this variable and
# make the file regenerate again.
# if(DEFINED CUDA_NVCC_DEPEND)
# message("CUDA_NVCC_DEPEND set")
# else()
# message("CUDA_NVCC_DEPEND NOT set")
# endif()
if(CUDA_NVCC_DEPEND)
#message("CUDA_NVCC_DEPEND found")
foreach(f ${CUDA_NVCC_DEPEND})
# message("searching for ${f}")
if(NOT EXISTS ${f})
#message("file ${f} not found")
set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
endif()
endforeach()
else()
#message("CUDA_NVCC_DEPEND false")
# No dependencies, so regenerate the file.
set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
endif()
#message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}")
# No incoming dependencies, so we need to generate them. Make the
# output depend on the dependency file itself, which should cause the
# rule to re-run.
if(CUDA_NVCC_DEPEND_REGENERATE)
set(CUDA_NVCC_DEPEND ${dependency_file})
#message("Generating an empty dependency_file: ${dependency_file}")
file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
endif()
endmacro()
###############################################################################
###############################################################################
# Setup variables' defaults
###############################################################################
###############################################################################
# Allow the user to specify if the device code is supposed to be 32 or 64 bit.
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON)
else()
set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF)
endif()
option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT})
# Attach the build rule to the source file in VS. This option
option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON)
# Prints out extra information about the cuda file during compilation
option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF)
# Set whether we are using emulation or device mode.
option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF)
# Enable batch builds
option(CUDA_ENABLE_BATCHING "Compile CUDA source files in parallel" OFF)
if(CUDA_ENABLE_BATCHING)
find_package(PythonInterp)
if(NOT PYTHONINTERP_FOUND)
message(SEND_ERROR "CUDA_ENABLE_BATCHING is enabled, but python wasn't found. Disabling")
set(CUDA_ENABLE_BATCHING OFF CACHE PATH "Compile CUDA source files in parallel" FORCE)
endif()
endif()
# Where to put the generated output.
set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR")
if(CMAKE_GENERATOR MATCHES "Visual Studio")
set(_cuda_dependencies_default ON)
else()
set(_cuda_dependencies_default OFF)
endif()
option(CUDA_GENERATE_DEPENDENCIES_DURING_CONFIGURE "Generate dependencies during configure time instead of only during build time." ${_cuda_dependencies_default})
option(CUDA_CHECK_DEPENDENCIES_DURING_COMPILE "Checks the dependencies during compilation and suppresses generation if the dependencies have been met." ${_cuda_dependencies_default})
# Parse HOST_COMPILATION mode.
option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON)
# Extra user settable flags
set(CUDA_NVCC_FLAGS "" CACHE STRING "Semi-colon delimit multiple arguments.")
if(CMAKE_GENERATOR MATCHES "Visual Studio")
set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)Tools/MSVC/$(VCToolsVersion)/bin/Host$(Platform)/$(PlatformTarget)")
if(MSVC_VERSION LESS 1910)
set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)bin")
endif()
set(CUDA_HOST_COMPILER "${_CUDA_MSVC_HOST_COMPILER}" CACHE FILEPATH "Host side compiler used by NVCC")
else()
if(APPLE
AND "${CMAKE_C_COMPILER_ID}" MATCHES "Clang"
AND "${CMAKE_C_COMPILER}" MATCHES "/cc$")
# Using cc which is symlink to clang may let NVCC think it is GCC and issue
# unhandled -dumpspecs option to clang. Also in case neither
# CMAKE_C_COMPILER is defined (project does not use C language) nor
# CUDA_HOST_COMPILER is specified manually we should skip -ccbin and let
# nvcc use its own default C compiler.
# Only care about this on APPLE with clang to avoid
# following symlinks to things like ccache
if(DEFINED CMAKE_C_COMPILER AND NOT DEFINED CUDA_HOST_COMPILER)
get_filename_component(c_compiler_realpath "${CMAKE_C_COMPILER}" REALPATH)
# if the real path does not end up being clang then
# go back to using CMAKE_C_COMPILER
if(NOT "${c_compiler_realpath}" MATCHES "/clang$")
set(c_compiler_realpath "${CMAKE_C_COMPILER}")
endif()
else()
set(c_compiler_realpath "")
endif()
set(CUDA_HOST_COMPILER "${c_compiler_realpath}" CACHE FILEPATH "Host side compiler used by NVCC")
else()
set(CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}"
CACHE FILEPATH "Host side compiler used by NVCC")
endif()
endif()
# Set up CUDA_VC_VARS_ALL_BAT if it hasn't been specified.
# set CUDA_VC_VARS_ALL_BAT explicitly to avoid any attempts to locate it via this algorithm.
if(MSVC AND NOT CUDA_VC_VARS_ALL_BAT AND CUDA_ENABLE_BATCHING)
get_filename_component(_cuda_dependency_ccbin_dir "${CMAKE_CXX_COMPILER}" DIRECTORY)
#message(STATUS "_cuda_dependency_ccbin_dir = ${_cuda_dependency_ccbin_dir}")
# In VS 6-12 (1200-1800) the versions were 6 off. Starting in VS 14 (1900) it's only 5.
if(MSVC_VERSION VERSION_LESS 1900)
math(EXPR vs_major_version "${MSVC_VERSION} / 100 - 6")
find_file( CUDA_VC_VARS_ALL_BAT vcvarsall.bat PATHS "${_cuda_dependency_ccbin_dir}/../.." NO_DEFAULT_PATH )
elseif(MSVC_VERSION VERSION_EQUAL 1900)
# Visual Studio 2015
set(vs_major_version "15")
find_file( CUDA_VC_VARS_ALL_BAT vcvarsall.bat PATHS "${_cuda_dependency_ccbin_dir}/../.." NO_DEFAULT_PATH )
elseif(MSVC_VERSION VERSION_LESS 1920)
# Visual Studio 2017
set(vs_major_version "15")
find_file( CUDA_VC_VARS_ALL_BAT vcvarsall.bat PATHS "${_cuda_dependency_ccbin_dir}/../../../../../../Auxiliary/Build" NO_DEFAULT_PATH )
else()
# Visual Studio 2019
set(vs_major_version "16")
find_file( CUDA_VC_VARS_ALL_BAT vcvarsall.bat PATHS "${_cuda_dependency_ccbin_dir}/../../../../../../Auxiliary/Build" NO_DEFAULT_PATH )
endif()
if( NOT CUDA_VC_VARS_ALL_BAT )
# See if we can get VS install location from the registry. Registry searches can only
# be accomplished via a CACHE variable, unfortunately.
get_filename_component(_cuda_vs_dir_tmp "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\${vs_major_version}.0\\Setup\\VS;ProductDir]" REALPATH CACHE)
if( _cuda_vs_dir_tmp )
set( CUDA_VS_DIR ${_cuda_vs_dir_tmp} )
unset( _cuda_vs_dir_tmp CACHE )
endif()
find_file( CUDA_VC_VARS_ALL_BAT vcvarsall.bat PATHS ${CUDA_VS_DIR}/VC/bin ${CUDA_VS_DIR}/VC/Auxiliary/Build NO_DEFAULT_PATH )
endif()
if( NOT CUDA_VC_VARS_ALL_BAT )
message(FATAL_ERROR "Cannot find path to vcvarsall.bat. Looked in ${CUDA_VS_DIR}/VC/bin ${CUDA_VS_DIR}/VC/Auxiliary/Build")
endif()
#message("CUDA_VS_DIR = ${CUDA_VS_DIR}, CUDA_VC_VARS_ALL_BAT = ${CUDA_VC_VARS_ALL_BAT}")
endif()
# Propagate the host flags to the host compiler via -Xcompiler
option(CUDA_PROPAGATE_HOST_FLAGS "Propage C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON)
# Enable CUDA_SEPARABLE_COMPILATION
option(CUDA_SEPARABLE_COMPILATION "Compile CUDA objects with separable compilation enabled. Requires CUDA 5.0+" OFF)
# Specifies whether the commands used when compiling the .cu file will be printed out.
option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF)
mark_as_advanced(
CUDA_64_BIT_DEVICE_CODE
CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE
CUDA_GENERATED_OUTPUT_DIR
CUDA_HOST_COMPILATION_CPP
CUDA_NVCC_FLAGS
CUDA_PROPAGATE_HOST_FLAGS
CUDA_BUILD_CUBIN
CUDA_BUILD_EMULATION
CUDA_VERBOSE_BUILD
CUDA_SEPARABLE_COMPILATION
)
# Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we
# need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the
# standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo)
# for completeness. We need run this loop in order to accomodate the addition
# of extra configuration types. Duplicate entries will be removed by
# REMOVE_DUPLICATES.
set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)
list(REMOVE_DUPLICATES CUDA_configuration_types)
foreach(config ${CUDA_configuration_types})
string(TOUPPER ${config} config_upper)
set(CUDA_NVCC_FLAGS_${config_upper} "" CACHE STRING "Semi-colon delimit multiple arguments.")
mark_as_advanced(CUDA_NVCC_FLAGS_${config_upper})
endforeach()
###############################################################################
###############################################################################
# Locate CUDA, Set Build Type, etc.
###############################################################################
###############################################################################
macro(cuda_unset_include_and_libraries)
unset(CUDA_TOOLKIT_INCLUDE CACHE)
unset(CUDA_CUDART_LIBRARY CACHE)
unset(CUDA_CUDA_LIBRARY CACHE)
# Make sure you run this before you unset CUDA_VERSION.
if(CUDA_VERSION VERSION_EQUAL "3.0")
# This only existed in the 3.0 version of the CUDA toolkit
unset(CUDA_CUDARTEMU_LIBRARY CACHE)
endif()
if( DEFINED CUDA_NVASM_EXECUTABLE )
unset(CUDA_NVASM_EXECUTABLE)
endif()
if( DEFINED CUDA_FATBINARY_EXECUTABLE )
unset(CUDA_FATBINARY_EXECUTABLE)
endif()
unset(CUDA_cudart_static_LIBRARY CACHE)
unset(CUDA_cudadevrt_LIBRARY CACHE)
unset(CUDA_cublas_LIBRARY CACHE)
unset(CUDA_cublas_device_LIBRARY CACHE)
unset(CUDA_cublasemu_LIBRARY CACHE)
unset(CUDA_cufft_LIBRARY CACHE)
unset(CUDA_cufftemu_LIBRARY CACHE)
unset(CUDA_cupti_LIBRARY CACHE)
unset(CUDA_curand_LIBRARY CACHE)
unset(CUDA_cusolver_LIBRARY CACHE)
unset(CUDA_cusparse_LIBRARY CACHE)
unset(CUDA_npp_LIBRARY CACHE)
unset(CUDA_nppc_LIBRARY CACHE)
unset(CUDA_nppi_LIBRARY CACHE)
unset(CUDA_npps_LIBRARY CACHE)
unset(CUDA_nvcuvenc_LIBRARY CACHE)
unset(CUDA_nvcuvid_LIBRARY CACHE)
unset(CUDA_nvrtc_LIBRARY CACHE)
unset(CUDA_USE_STATIC_CUDA_RUNTIME CACHE)
unset(CUDA_GPU_DETECT_OUTPUT CACHE)
endmacro()
# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed,
# if they have then clear the cache variables, so that will be detected again.
if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
unset(CUDA_TOOLKIT_TARGET_DIR CACHE)
unset(CUDA_NVCC_EXECUTABLE CACHE)
cuda_unset_include_and_libraries()
unset(CUDA_VERSION CACHE)
endif()
if(NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}")
cuda_unset_include_and_libraries()
endif()
#
# End of unset()
#
#
# Start looking for things
#
# Search for the cuda distribution.
if(NOT CUDA_TOOLKIT_ROOT_DIR AND NOT CMAKE_CROSSCOMPILING)
# Search in the CUDA_BIN_PATH first.
find_path(CUDA_TOOLKIT_ROOT_DIR
NAMES nvcc nvcc.exe
PATHS
ENV CUDA_TOOLKIT_ROOT
ENV CUDA_PATH
ENV CUDA_BIN_PATH
PATH_SUFFIXES bin bin64
DOC "Toolkit location."
NO_DEFAULT_PATH
)
# Now search default paths
find_path(CUDA_TOOLKIT_ROOT_DIR
NAMES nvcc nvcc.exe
PATHS /opt/cuda/bin
/usr/local/bin
/usr/local/cuda/bin
DOC "Toolkit location."
)
if (CUDA_TOOLKIT_ROOT_DIR)
string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR})
# We need to force this back into the cache.
set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE)
set(CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR})
endif()
if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
if(CUDA_FIND_REQUIRED)
message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR")
elseif(NOT CUDA_FIND_QUIETLY)
message("CUDA_TOOLKIT_ROOT_DIR not found or specified")
endif()
endif ()
endif ()
if(CMAKE_CROSSCOMPILING)
SET (CUDA_TOOLKIT_ROOT $ENV{CUDA_TOOLKIT_ROOT})
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a")
# Support for NVPACK
set (CUDA_TOOLKIT_TARGET_NAME "armv7-linux-androideabi")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
# Support for arm cross compilation
set(CUDA_TOOLKIT_TARGET_NAME "armv7-linux-gnueabihf")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
# Support for aarch64 cross compilation
if (ANDROID_ARCH_NAME STREQUAL "arm64")
set(CUDA_TOOLKIT_TARGET_NAME "aarch64-linux-androideabi")
else()
set(CUDA_TOOLKIT_TARGET_NAME "aarch64-linux")
endif (ANDROID_ARCH_NAME STREQUAL "arm64")
endif()
if (EXISTS "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}")
set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}" CACHE PATH "CUDA Toolkit target location.")
SET (CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT})
mark_as_advanced(CUDA_TOOLKIT_TARGET_DIR)
endif()
# add known CUDA targetr root path to the set of directories we search for programs, libraries and headers
set( CMAKE_FIND_ROOT_PATH "${CUDA_TOOLKIT_TARGET_DIR};${CMAKE_FIND_ROOT_PATH}")
macro( cuda_find_host_program )
find_host_program( ${ARGN} )
endmacro()
else()
# for non-cross-compile, find_host_program == find_program and CUDA_TOOLKIT_TARGET_DIR == CUDA_TOOLKIT_ROOT_DIR
macro( cuda_find_host_program )
find_program( ${ARGN} )
endmacro()
SET (CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR})
endif()
# CUDA_NVCC_EXECUTABLE
cuda_find_host_program(CUDA_NVCC_EXECUTABLE
NAMES nvcc
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
ENV CUDA_PATH
ENV CUDA_BIN_PATH
PATH_SUFFIXES bin bin64
NO_DEFAULT_PATH
)
# Search default search paths, after we search our own set of paths.
cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc)
mark_as_advanced(CUDA_NVCC_EXECUTABLE)
# CUDA_NVASM_EXECUTABLE
cuda_find_host_program(CUDA_NVASM_EXECUTABLE
NAMES nvasm_internal
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
ENV CUDA_PATH
ENV CUDA_BIN_PATH
PATH_SUFFIXES bin bin64
NO_DEFAULT_PATH
)
# Search default search paths, after we search our own set of paths.
cuda_find_host_program(CUDA_NVASM_EXECUTABLE nvasm_internal)
mark_as_advanced(CUDA_NVASM_EXECUTABLE)
# Find fatbinary from CUDA
cuda_find_host_program(CUDA_FATBINARY_EXECUTABLE
NAMES fatbinary
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
ENV CUDA_PATH
ENV CUDA_BIN_PATH
PATH_SUFFIXES bin bin64
NO_DEFAULT_PATH
)
# Search default search paths, after we search our own set of paths.
cuda_find_host_program(CUDA_FATBINARY_EXECUTABLE fatbinary)
mark_as_advanced(CUDA_FATBINARY_EXECUTABLE)
if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION)
# Compute the version.
execute_process (COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT)
string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT})
string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT})
set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.")
mark_as_advanced(CUDA_VERSION)
else()
# Need to set these based off of the cached value
string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR "${CUDA_VERSION}")
string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR "${CUDA_VERSION}")
endif()
# Always set this convenience variable
set(CUDA_VERSION_STRING "${CUDA_VERSION}")
# Here we need to determine if the version we found is acceptable. We will
# assume that is unless CUDA_FIND_VERSION_EXACT or CUDA_FIND_VERSION is
# specified. The presence of either of these options checks the version
# string and signals if the version is acceptable or not.
set(_cuda_version_acceptable TRUE)
#
if(CUDA_FIND_VERSION_EXACT AND NOT CUDA_VERSION VERSION_EQUAL CUDA_FIND_VERSION)
set(_cuda_version_acceptable FALSE)
endif()
#
if(CUDA_FIND_VERSION AND CUDA_VERSION VERSION_LESS CUDA_FIND_VERSION)
set(_cuda_version_acceptable FALSE)
endif()
#
if(NOT _cuda_version_acceptable)
set(_cuda_error_message "Requested CUDA version ${CUDA_FIND_VERSION}, but found unacceptable version ${CUDA_VERSION}")
if(CUDA_FIND_REQUIRED)
message("${_cuda_error_message}")
elseif(NOT CUDA_FIND_QUIETLY)
message("${_cuda_error_message}")
endif()
endif()
# CUDA_TOOLKIT_INCLUDE
find_path(CUDA_TOOLKIT_INCLUDE
device_functions.h # Header included in toolkit
PATHS ${CUDA_TOOLKIT_TARGET_DIR}
ENV CUDA_PATH
ENV CUDA_INC_PATH
PATH_SUFFIXES include
NO_DEFAULT_PATH
)
# Search default search paths, after we search our own set of paths.
find_path(CUDA_TOOLKIT_INCLUDE device_functions.h)
mark_as_advanced(CUDA_TOOLKIT_INCLUDE)
if (CUDA_VERSION VERSION_GREATER "7.0" OR EXISTS "${CUDA_TOOLKIT_INCLUDE}/cuda_fp16.h")
set(CUDA_HAS_FP16 TRUE)
else()
set(CUDA_HAS_FP16 FALSE)
endif()
# Set the user list of include dir to nothing to initialize it.
set (CUDA_NVCC_INCLUDE_ARGS_USER "")
set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE})
macro(cuda_find_library_local_first_with_path_ext _var _names _doc _path_ext )
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
# CUDA 3.2+ on Windows moved the library directories, so we need the new
# and old paths.
set(_cuda_64bit_lib_dir "${_path_ext}lib/x64" "${_path_ext}lib64" "${_path_ext}libx64" )
endif()
# CUDA 3.2+ on Windows moved the library directories, so we need to new
# (lib/Win32) and the old path (lib).
find_library(${_var}
NAMES ${_names}
PATHS "${CUDA_TOOLKIT_TARGET_DIR}"
ENV CUDA_PATH
ENV CUDA_LIB_PATH
PATH_SUFFIXES ${_cuda_64bit_lib_dir} "${_path_ext}lib/Win32" "${_path_ext}lib" "${_path_ext}libWin32"
DOC ${_doc}
NO_DEFAULT_PATH
)
if (NOT CMAKE_CROSSCOMPILING)
# Search default search paths, after we search our own set of paths.
find_library(${_var}
NAMES ${_names}
PATHS "/usr/lib/nvidia-current"
DOC ${_doc}
)
endif()
endmacro()
macro(cuda_find_library_local_first _var _names _doc)
cuda_find_library_local_first_with_path_ext( "${_var}" "${_names}" "${_doc}" "" )
endmacro()
macro(find_library_local_first _var _names _doc )
cuda_find_library_local_first( "${_var}" "${_names}" "${_doc}" "" )
endmacro()
# CUDA_LIBRARIES
cuda_find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library")
if(CUDA_VERSION VERSION_EQUAL "3.0")
# The cudartemu library only existed for the 3.0 version of CUDA.
cuda_find_library_local_first(CUDA_CUDARTEMU_LIBRARY cudartemu "\"cudartemu\" library")
mark_as_advanced(
CUDA_CUDARTEMU_LIBRARY
)
endif()
if(NOT CUDA_VERSION VERSION_LESS "5.5")
cuda_find_library_local_first(CUDA_cudart_static_LIBRARY cudart_static "static CUDA runtime library")
mark_as_advanced(CUDA_cudart_static_LIBRARY)
endif()
if(CUDA_cudart_static_LIBRARY)
# If static cudart available, use it by default, but provide a user-visible option to disable it.
option(CUDA_USE_STATIC_CUDA_RUNTIME "Use the static version of the CUDA runtime library if available" ON)
set(CUDA_CUDART_LIBRARY_VAR CUDA_cudart_static_LIBRARY)
else()
# If not available, silently disable the option.
set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "")
set(CUDA_CUDART_LIBRARY_VAR CUDA_CUDART_LIBRARY)
endif()
if(NOT CUDA_VERSION VERSION_LESS "5.0")
cuda_find_library_local_first(CUDA_cudadevrt_LIBRARY cudadevrt "\"cudadevrt\" library")
mark_as_advanced(CUDA_cudadevrt_LIBRARY)
endif()
if(CUDA_USE_STATIC_CUDA_RUNTIME)
if(UNIX)
# Check for the dependent libraries. Here we look for pthreads.
if (DEFINED CMAKE_THREAD_PREFER_PTHREAD)
set(_cuda_cmake_thread_prefer_pthread ${CMAKE_THREAD_PREFER_PTHREAD})
endif()
set(CMAKE_THREAD_PREFER_PTHREAD 1)
# Many of the FindXYZ CMake comes with makes use of try_compile with int main(){return 0;}
# as the source file. Unfortunately this causes a warning with -Wstrict-prototypes and
# -Werror causes the try_compile to fail. We will just temporarily disable other flags
# when doing the find_package command here.
set(_cuda_cmake_c_flags ${CMAKE_C_FLAGS})
set(CMAKE_C_FLAGS "-fPIC")
find_package(Threads REQUIRED)
set(CMAKE_C_FLAGS ${_cuda_cmake_c_flags})
if (DEFINED _cuda_cmake_thread_prefer_pthread)
set(CMAKE_THREAD_PREFER_PTHREAD ${_cuda_cmake_thread_prefer_pthread})
unset(_cuda_cmake_thread_prefer_pthread)
else()
unset(CMAKE_THREAD_PREFER_PTHREAD)
endif()
if (NOT APPLE)
#On Linux, you must link against librt when using the static cuda runtime.
find_library(CUDA_rt_LIBRARY rt)
# CMake has a CMAKE_DL_LIBS, but I'm not sure what version of CMake provides this
find_library(CUDA_dl_LIBRARY dl)
if (NOT CUDA_rt_LIBRARY)
message(WARNING "Expecting to find librt for libcudart_static, but didn't find it.")
endif()
if (NOT CUDA_dl_LIBRARY)
message(WARNING "Expecting to find libdl for libcudart_static, but didn't find it.")
endif()
endif()
endif()
endif()
# CUPTI library showed up in cuda toolkit 4.0
if(NOT CUDA_VERSION VERSION_LESS "4.0")
cuda_find_library_local_first_with_path_ext(CUDA_cupti_LIBRARY cupti "\"cupti\" library" "extras/CUPTI/")
mark_as_advanced(CUDA_cupti_LIBRARY)
endif()
# Set the CUDA_LIBRARIES variable. This is the set of stuff to link against if you are
# using the CUDA runtime. For the dynamic version of the runtime, most of the
# dependencies are brough in, but for the static version there are additional libraries
# and linker commands needed.
# Initialize to empty
set(CUDA_LIBRARIES)
if(APPLE)
# We need to add the path to cudart to the linker using rpath, since the library name
# for the cuda libraries is prepended with @rpath. We need to add the path to the
# toolkit before we add the path to the driver below, so that we find our toolkit's code
# first.
if(CUDA_BUILD_EMULATION AND CUDA_CUDARTEMU_LIBRARY)
get_filename_component(_cuda_path_to_cudart "${CUDA_CUDARTEMU_LIBRARY}" PATH)
else()
get_filename_component(_cuda_path_to_cudart "${CUDA_CUDART_LIBRARY}" PATH)
endif()
if(_cuda_path_to_cudart)
list(APPEND CUDA_LIBRARIES -Wl,-rpath "-Wl,${_cuda_path_to_cudart}")
endif()
endif()
# If we are using emulation mode and we found the cudartemu library then use
# that one instead of cudart.
if(CUDA_BUILD_EMULATION AND CUDA_CUDARTEMU_LIBRARY)
list(APPEND CUDA_LIBRARIES ${CUDA_CUDARTEMU_LIBRARY})
elseif(CUDA_USE_STATIC_CUDA_RUNTIME AND CUDA_cudart_static_LIBRARY)
list(APPEND CUDA_LIBRARIES ${CUDA_cudart_static_LIBRARY} ${CMAKE_THREAD_LIBS_INIT})
if (CUDA_rt_LIBRARY)
list(APPEND CUDA_LIBRARIES ${CUDA_rt_LIBRARY})
endif()
if (CUDA_dl_LIBRARY)
list(APPEND CUDA_LIBRARIES ${CUDA_dl_LIBRARY})
endif()
if(APPLE)
# We need to add the default path to the driver (libcuda.dylib) as an rpath, so that
# the static cuda runtime can find it at runtime.
list(APPEND CUDA_LIBRARIES -Wl,-rpath,/usr/local/cuda/lib)
endif()
else()
list(APPEND CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY})
endif()
# 1.1 toolkit on linux doesn't appear to have a separate library on
# some platforms.
cuda_find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).")
mark_as_advanced(
CUDA_CUDA_LIBRARY
CUDA_CUDART_LIBRARY
)
#######################
# Look for some of the toolkit helper libraries
macro(FIND_CUDA_HELPER_LIBS _name)
cuda_find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library")
mark_as_advanced(CUDA_${_name}_LIBRARY)
endmacro()
#######################
# Disable emulation for v3.1 onward
if(CUDA_VERSION VERSION_GREATER "3.0")
if(CUDA_BUILD_EMULATION)
message(FATAL_ERROR "CUDA_BUILD_EMULATION is not supported in version 3.1 and onwards. You must disable it to proceed. You have version ${CUDA_VERSION}.")
endif()
endif()
# Search for additional CUDA toolkit libraries.
if(CUDA_VERSION VERSION_LESS "3.1")
# Emulation libraries aren't available in version 3.1 onward.
find_cuda_helper_libs(cufftemu)
find_cuda_helper_libs(cublasemu)
endif()
find_cuda_helper_libs(cufft)
find_cuda_helper_libs(cublas)
if(NOT CUDA_VERSION VERSION_LESS "3.2")
# cusparse showed up in version 3.2
find_cuda_helper_libs(cusparse)
find_cuda_helper_libs(curand)
if (WIN32)
find_cuda_helper_libs(nvcuvenc)
find_cuda_helper_libs(nvcuvid)
endif()
endif()
if(CUDA_VERSION VERSION_GREATER "5.0")
find_cuda_helper_libs(cublas_device)
# In CUDA 5.5 NPP was splitted onto 3 separate libraries.
find_cuda_helper_libs(nppc)
find_cuda_helper_libs(nppi)
find_cuda_helper_libs(npps)
set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppi_LIBRARY};${CUDA_npps_LIBRARY}")
elseif(NOT CUDA_VERSION VERSION_LESS "4.0")
find_cuda_helper_libs(npp)
endif()
if(NOT CUDA_VERSION VERSION_LESS "7.0")
# cusolver showed up in version 7.0
find_cuda_helper_libs(cusolver)
endif()
if(NOT CUDA_VERSION VERSION_LESS "7.5")
find_cuda_helper_libs(nvrtc)
endif()
if (CUDA_BUILD_EMULATION)
set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY})
set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY})
else()
set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY})
set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY})
endif()
########################
# Look for the SDK stuff. As of CUDA 3.0 NVSDKCUDA_ROOT has been replaced with
# NVSDKCOMPUTE_ROOT with the old CUDA C contents moved into the C subdirectory
find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h
HINTS
"$ENV{NVSDKCOMPUTE_ROOT}/C"
ENV NVSDKCUDA_ROOT
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]"
PATHS
"/Developer/GPU\ Computing/C"
)
# Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the
# environment variables.
set(CUDA_SDK_SEARCH_PATH
"${CUDA_SDK_ROOT_DIR}"
"${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2"
"${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2"
"${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK"
"$ENV{HOME}/NVIDIA_CUDA_SDK"
"$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX"
"/Developer/CUDA"
)
# Example of how to find an include file from the CUDA_SDK_ROOT_DIR
# find_path(CUDA_CUT_INCLUDE_DIR
# cutil.h
# PATHS ${CUDA_SDK_SEARCH_PATH}
# PATH_SUFFIXES "common/inc"
# DOC "Location of cutil.h"
# NO_DEFAULT_PATH
# )
# # Now search system paths
# find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h")
# mark_as_advanced(CUDA_CUT_INCLUDE_DIR)
# Example of how to find a library in the CUDA_SDK_ROOT_DIR
# # cutil library is called cutil64 for 64 bit builds on windows. We don't want
# # to get these confused, so we are setting the name based on the word size of
# # the build.
# if(CMAKE_SIZEOF_VOID_P EQUAL 8)
# set(cuda_cutil_name cutil64)
# else()
# set(cuda_cutil_name cutil32)
# endif()
# find_library(CUDA_CUT_LIBRARY
# NAMES cutil ${cuda_cutil_name}
# PATHS ${CUDA_SDK_SEARCH_PATH}
# # The new version of the sdk shows up in common/lib, but the old one is in lib
# PATH_SUFFIXES "common/lib" "lib"
# DOC "Location of cutil library"
# NO_DEFAULT_PATH
# )
# # Now search system paths
# find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library")
# mark_as_advanced(CUDA_CUT_LIBRARY)
# set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY})
#############################
# Check for required components
set(CUDA_FOUND TRUE)
set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL
"This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE)
set(CUDA_TOOLKIT_TARGET_DIR_INTERNAL "${CUDA_TOOLKIT_TARGET_DIR}" CACHE INTERNAL
"This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was set successfully." FORCE)
set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL
"This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CUDA
REQUIRED_VARS
CUDA_TOOLKIT_ROOT_DIR
CUDA_NVCC_EXECUTABLE
CUDA_INCLUDE_DIRS
${CUDA_CUDART_LIBRARY_VAR}
VERSION_VAR
CUDA_VERSION
)
###############################################################################
###############################################################################
# Macros
###############################################################################
###############################################################################
###############################################################################
# Add include directories to pass to the nvcc command.
macro(CUDA_INCLUDE_DIRECTORIES)
foreach(dir ${ARGN})
list(APPEND CUDA_NVCC_INCLUDE_ARGS_USER -I${dir})
endforeach()
endmacro()
##############################################################################
cuda_find_helper_file(parse_cubin cmake)
cuda_find_helper_file(make2cmake cmake)
cuda_find_helper_file(run_nvcc cmake)
include("${CMAKE_CURRENT_LIST_DIR}/FindCUDA/select_compute_arch.cmake")
##############################################################################
# Separate the OPTIONS out from the sources
#
macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options)
set( ${_sources} )
set( ${_cmake_options} )
set( ${_options} )
set( _found_options FALSE )
foreach(arg ${ARGN})
if("x${arg}" STREQUAL "xOPTIONS")
set( _found_options TRUE )
elseif(
"x${arg}" STREQUAL "xWIN32" OR
"x${arg}" STREQUAL "xMACOSX_BUNDLE" OR
"x${arg}" STREQUAL "xEXCLUDE_FROM_ALL" OR
"x${arg}" STREQUAL "xSTATIC" OR
"x${arg}" STREQUAL "xSHARED" OR
"x${arg}" STREQUAL "xMODULE"
)
list(APPEND ${_cmake_options} ${arg})
else()
if ( _found_options )
list(APPEND ${_options} ${arg})
else()
# Assume this is a file
list(APPEND ${_sources} ${arg})
endif()
endif()
endforeach()
endmacro()
##############################################################################
# Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix
#
macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix)
set( _found_config )
foreach(arg ${ARGN})
# Determine if we are dealing with a perconfiguration flag
foreach(config ${CUDA_configuration_types})
string(TOUPPER ${config} config_upper)
if (arg STREQUAL "${config_upper}")
set( _found_config _${arg})
# Set arg to nothing to keep it from being processed further
set( arg )
endif()
endforeach()
if ( arg )
list(APPEND ${_option_prefix}${_found_config} "${arg}")
endif()
endforeach()
endmacro()
##############################################################################
# Helper to add the include directory for CUDA only once
function(CUDA_ADD_CUDA_INCLUDE_ONCE)
get_directory_property(_include_directories INCLUDE_DIRECTORIES)
set(_add TRUE)
if(_include_directories)
foreach(dir ${_include_directories})
if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}")
set(_add FALSE)
endif()
endforeach()
endif()
if(_add)
include_directories(${CUDA_INCLUDE_DIRS})
endif()
endfunction()
function(CUDA_BUILD_SHARED_LIBRARY shared_flag)
set(cmake_args ${ARGN})
# If SHARED, MODULE, or STATIC aren't already in the list of arguments, then
# add SHARED or STATIC based on the value of BUILD_SHARED_LIBS.
list(FIND cmake_args SHARED _cuda_found_SHARED)
list(FIND cmake_args MODULE _cuda_found_MODULE)
list(FIND cmake_args STATIC _cuda_found_STATIC)
if( _cuda_found_SHARED GREATER -1 OR
_cuda_found_MODULE GREATER -1 OR
_cuda_found_STATIC GREATER -1)
set(_cuda_build_shared_libs)
else()
if (BUILD_SHARED_LIBS)
set(_cuda_build_shared_libs SHARED)
else()
set(_cuda_build_shared_libs STATIC)
endif()
endif()
set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE)
endfunction()
##############################################################################
# Helper to avoid clashes of files with the same basename but different paths.
# This doesn't attempt to do exactly what CMake internals do, which is to only
# add this path when there is a conflict, since by the time a second collision
# in names is detected it's already too late to fix the first one. For
# consistency sake the relative path will be added to all files.
function(CUDA_COMPUTE_BUILD_PATH path build_path)
#message("CUDA_COMPUTE_BUILD_PATH([${path}] ${build_path})")
# Only deal with CMake style paths from here on out
file(TO_CMAKE_PATH "${path}" bpath)
if (IS_ABSOLUTE "${bpath}")
# Absolute paths are generally unnessary, especially if something like
# file(GLOB_RECURSE) is used to pick up the files.
string(FIND "${bpath}" "${CMAKE_CURRENT_BINARY_DIR}" _binary_dir_pos)
if (_binary_dir_pos EQUAL 0)
file(RELATIVE_PATH bpath "${CMAKE_CURRENT_BINARY_DIR}" "${bpath}")
else()
file(RELATIVE_PATH bpath "${CMAKE_CURRENT_SOURCE_DIR}" "${bpath}")
endif()
endif()
# This recipe is from cmLocalGenerator::CreateSafeUniqueObjectFileName in the
# CMake source.
# Remove leading /
string(REGEX REPLACE "^[/]+" "" bpath "${bpath}")
# Avoid absolute paths by removing ':'
string(REPLACE ":" "_" bpath "${bpath}")
# Avoid relative paths that go up the tree
string(REPLACE "../" "__/" bpath "${bpath}")
# Avoid spaces
string(REPLACE " " "_" bpath "${bpath}")
# Strip off the filename. I wait until here to do it, since removin the
# basename can make a path that looked like path/../basename turn into
# path/.. (notice the trailing slash).
get_filename_component(bpath "${bpath}" PATH)
set(${build_path} "${bpath}" PARENT_SCOPE)
#message("${build_path} = ${bpath}")
endfunction()
##############################################################################
##############################################################################
# CUDA WRAP SRCS
#
# This helper macro populates the following variables and setups up custom
# commands and targets to invoke the nvcc compiler to generate C or PTX source
# dependent upon the format parameter. The compiler is invoked once with -M
# to generate a dependency file and a second time with -cuda or -ptx to generate
# a .cpp or .ptx file.
# INPUT:
# cuda_target - Target name
# format - PTX, CUBIN, FATBIN or OBJ
# FILE1 .. FILEN - The remaining arguments are the sources to be wrapped.
# OPTIONS - Extra options to NVCC
# OUTPUT:
# generated_files - List of generated files
##############################################################################
##############################################################################
macro(CUDA_WRAP_SRCS cuda_target format generated_files)
# If CMake doesn't support separable compilation, complain
if(CUDA_SEPARABLE_COMPILATION AND CMAKE_VERSION VERSION_LESS "2.8.10.1")
message(SEND_ERROR "CUDA_SEPARABLE_COMPILATION isn't supported for CMake versions less than 2.8.10.1")
endif()
# Set up all the command line flags here, so that they can be overridden on a per target basis.
set(nvcc_flags "")
# Emulation if the card isn't present.
if (CUDA_BUILD_EMULATION)
# Emulation.
set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g)
else()
# Device mode. No flags necessary.
endif()
if(CUDA_HOST_COMPILATION_CPP)
set(CUDA_C_OR_CXX CXX)
else()
if(CUDA_VERSION VERSION_LESS "3.0")
set(nvcc_flags ${nvcc_flags} --host-compilation C)
else()
message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" )
endif()
set(CUDA_C_OR_CXX C)
endif()
set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
if(CUDA_64_BIT_DEVICE_CODE)
set(nvcc_flags ${nvcc_flags} -m64)
else()
set(nvcc_flags ${nvcc_flags} -m32)
endif()
if(CUDA_TARGET_CPU_ARCH)
set(nvcc_flags ${nvcc_flags} "--target-cpu-architecture=${CUDA_TARGET_CPU_ARCH}")
endif()
# This needs to be passed in at this stage, because VS needs to fill out the
# value of VCInstallDir from within VS. Note that CCBIN is only used if
# -ccbin or --compiler-bindir isn't used and CUDA_HOST_COMPILER matches
# $(VCInstallDir)/bin.
if(CMAKE_GENERATOR MATCHES "Visual Studio")
set(ccbin_flags -D "\"CCBIN:PATH=${CUDA_HOST_COMPILER}\"" )
else()
set(ccbin_flags)
endif()
# Figure out which configure we will use and pass that in as an argument to
# the script. We need to defer the decision until compilation time, because
# for VS projects we won't know if we are making a debug or release build
# until build time.
if(CMAKE_GENERATOR MATCHES "Visual Studio")
set( CUDA_build_configuration "$(ConfigurationName)" )
else()
set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}")
endif()
# Initialize our list of includes with the user ones followed by the CUDA system ones.
set(CUDA_NVCC_INCLUDE_ARGS ${CUDA_NVCC_INCLUDE_ARGS_USER} "-I${CUDA_INCLUDE_DIRS}")
# Get the include directories for this directory and use them for our nvcc command.
get_directory_property(CUDA_NVCC_INCLUDE_DIRECTORIES INCLUDE_DIRECTORIES)
if(CUDA_NVCC_INCLUDE_DIRECTORIES)
foreach(dir ${CUDA_NVCC_INCLUDE_DIRECTORIES})
list(APPEND CUDA_NVCC_INCLUDE_ARGS -I${dir})
endforeach()
endif()
# Reset these variables
set(CUDA_WRAP_OPTION_NVCC_FLAGS)
foreach(config ${CUDA_configuration_types})
string(TOUPPER ${config} config_upper)
set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper})
endforeach()
CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${ARGN})
CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options})
# Figure out if we are building a shared library. BUILD_SHARED_LIBS is
# respected in CUDA_ADD_LIBRARY.
set(_cuda_build_shared_libs FALSE)
# SHARED, MODULE
list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED)
list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE)
if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1)
set(_cuda_build_shared_libs TRUE)
endif()
# STATIC
list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC)
if(_cuda_found_STATIC GREATER -1)
set(_cuda_build_shared_libs FALSE)
endif()
# CUDA_HOST_FLAGS
if(_cuda_build_shared_libs)
# If we are setting up code for a shared library, then we need to add extra flags for
# compiling objects for shared libraries.
set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS})
else()
set(CUDA_HOST_SHARED_FLAGS)
endif()
# If we are using batch building and we are using VS 2013+ we could have problems with
# parallel accesses to the PDB files which comes from the parallel batch building. /FS
# serializes the builds which fixes this. There doesn't seem any harm to add this for
# PTX targets or for builds that don't do parallel building.
set( EXTRA_FS_ARG )
if( CUDA_BATCH_BUILD_LOG AND MSVC )
if(NOT MSVC_VERSION VERSION_LESS 1800)
set( EXTRA_FS_ARG "/FS" )
endif()
endif()
# Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We
# always need to set the SHARED_FLAGS, though.
if(CUDA_PROPAGATE_HOST_FLAGS)
set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${CMAKE_${CUDA_C_OR_CXX}_FLAGS} ${CUDA_HOST_SHARED_FLAGS} ${EXTRA_FS_ARG})")
else()
set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS} ${EXTRA_FS_ARG})")
endif()
set(_cuda_nvcc_flags_config "# Build specific configuration flags")
# Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake
foreach(config ${CUDA_configuration_types})
string(TOUPPER ${config} config_upper)
# CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS
# we convert the strings to lists (like we want).
if(CUDA_PROPAGATE_HOST_FLAGS)
# nvcc chokes on -g3 in versions previous to 3.0, so replace it with -g
set(_cuda_fix_g3 FALSE)
if(CMAKE_COMPILER_IS_GNUCC)
if (CUDA_VERSION VERSION_LESS "3.0" OR
CUDA_VERSION VERSION_EQUAL "4.1" OR
CUDA_VERSION VERSION_EQUAL "4.2"
)
set(_cuda_fix_g3 TRUE)
endif()
endif()
if(_cuda_fix_g3)
string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
else()
set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
endif()
# Using the optimized debug information flag causes problems.
if (MSVC)
string(REPLACE "/Zo" "" _cuda_C_FLAGS "${_cuda_C_FLAGS}")
endif()
set(_cuda_host_flags "${_cuda_host_flags}\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})")
endif()
# Note that if we ever want CUDA_NVCC_FLAGS_<CONFIG> to be string (instead of a list
# like it is currently), we can remove the quotes around the
# ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_<CONFIG> variable.
set(_cuda_nvcc_flags_config "${_cuda_nvcc_flags_config}\nset(CUDA_NVCC_FLAGS_${config_upper} ${CUDA_NVCC_FLAGS_${config_upper}} ;; ${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}})")
endforeach()
# NVCC can't handle this C++ argument, because it uses during certain phases where the C
# compiler is invoked and complains about it.
if( CMAKE_COMPILER_IS_GNUCXX )
string(REPLACE "-fvisibility-inlines-hidden" "" _cuda_host_flags "${_cuda_host_flags}")
endif()
# Process the C++11 flag. If the host sets the flag, we need to add it to nvcc and
# remove it from the host. This is because -Xcompile -std=c++ will choke nvcc (it uses
# the C preprocessor). In order to get this to work correctly, we need to use nvcc's
# specific c++11 flag.
if( "${_cuda_host_flags}" MATCHES "-std=c\\+\\+11" OR ( CMAKE_CXX_STANDARD EQUAL 11 AND NOT USING_WINDOWS_CL ) )
# Add the c++11 flag to nvcc if it isn't already present. Note that we only look at
# the main flag instead of the configuration specific flags.
if( NOT "${CUDA_NVCC_FLAGS}" MATCHES "-std;c\\+\\+11" )
list(APPEND nvcc_flags --std c++11)
endif()
string(REGEX REPLACE "[-]+std=c\\+\\+11" "" _cuda_host_flags "${_cuda_host_flags}")
endif()
# Get the list of definitions from the directory property
get_directory_property(CUDA_NVCC_DEFINITIONS COMPILE_DEFINITIONS)
if(CUDA_NVCC_DEFINITIONS)
foreach(_definition ${CUDA_NVCC_DEFINITIONS})
list(APPEND nvcc_flags "-D${_definition}")
endforeach()
endif()
if(_cuda_build_shared_libs)
list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS")
endif()
# Reset the output variable
set(_cuda_wrap_generated_files "")
# Iterate over the macro arguments and create custom
# commands for all the .cu files.
foreach(file ${ARGN})
# Ignore any file marked as a HEADER_FILE_ONLY
get_source_file_property(_is_header ${file} HEADER_FILE_ONLY)
# Allow per source file overrides of the format. Also allows compiling non-.cu files.
get_source_file_property(_cuda_source_format ${file} CUDA_SOURCE_PROPERTY_FORMAT)
if((${file} MATCHES "\\.cu$" OR _cuda_source_format) AND NOT _is_header)
if(NOT _cuda_source_format)
set(_cuda_source_format ${format})
endif()
# If file isn't a .cu file, we need to tell nvcc to treat it as such.
if(NOT ${file} MATCHES "\\.cu$")
set(cuda_language_flag -x=cu)
else()
set(cuda_language_flag)
endif()
if( ${_cuda_source_format} MATCHES "OBJ")
set( cuda_compile_to_external_module OFF )
else()
set( cuda_compile_to_external_module ON )
if( ${_cuda_source_format} MATCHES "PTX" )
set( cuda_compile_to_external_module_flag "-ptx")
set( cuda_compile_to_external_module_type "ptx" )
elseif( ${_cuda_source_format} MATCHES "CUBIN")
set( cuda_compile_to_external_module_flag "-cubin" )
set( cuda_compile_to_external_module_type "cubin" )
elseif( ${_cuda_source_format} MATCHES "FATBIN")
set( cuda_compile_to_external_module_flag "-fatbin" )
set( cuda_compile_to_external_module_type "fatbin" )
else()
if(DEFINED CUDA_CUSTOM_SOURCE_FORMAT_FLAG_${_cuda_source_format} AND DEFINED CUDA_CUSTOM_SOURCE_FORMAT_TYPE_${_cuda_source_format})
set( cuda_compile_to_external_module_flag "${CUDA_CUSTOM_SOURCE_FORMAT_FLAG_${_cuda_source_format}}" )
set( cuda_compile_to_external_module_type "${CUDA_CUSTOM_SOURCE_FORMAT_TYPE_${_cuda_source_format}}" )
else()
message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS or set with CUDA_SOURCE_PROPERTY_FORMAT file property for file '${file}': '${_cuda_source_format}'. Use OBJ, PTX, CUBIN or FATBIN.")
endif()
endif()
endif()
if(cuda_compile_to_external_module)
# Don't use any of the host compilation flags for PTX targets.
set(CUDA_HOST_FLAGS)
set(CUDA_NVCC_FLAGS_CONFIG)
else()
set(CUDA_HOST_FLAGS ${_cuda_host_flags})
set(CUDA_NVCC_FLAGS_CONFIG ${_cuda_nvcc_flags_config})
endif()
# Determine output directory
cuda_compute_build_path("${file}" cuda_build_path)
set(cuda_compile_intermediate_directory "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${cuda_build_path}")
if(CUDA_GENERATED_OUTPUT_DIR)
set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}")
else()
if ( cuda_compile_to_external_module )
set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}")
else()
set(cuda_compile_output_dir "${cuda_compile_intermediate_directory}")
endif()
endif()
# Add a custom target to generate a c or ptx file. ######################
get_filename_component( basename ${file} NAME )
if( cuda_compile_to_external_module )
set(generated_file_path "${cuda_compile_output_dir}")
set(generated_file_basename "${cuda_target}_generated_${basename}.${cuda_compile_to_external_module_type}")
set(format_flag "${cuda_compile_to_external_module_flag}")
file(MAKE_DIRECTORY "${cuda_compile_output_dir}")
else()
set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}")
set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}")
if(CUDA_SEPARABLE_COMPILATION)
set(format_flag "-dc")
else()
set(format_flag "-c")
endif()
endif()
# Set all of our file names. Make sure that whatever filenames that have
# generated_file_path in them get passed in through as a command line
# argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time
# instead of configure time.
set(generated_file "${generated_file_path}/${generated_file_basename}")
set(cmake_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.depend")
set(NVCC_generated_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.NVCC-depend")
set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt")
set(generated_fatbin_file "${generated_file_path}/${generated_file_basename}.fatbin.txt")
set(custom_target_script "${cuda_compile_intermediate_directory}/${generated_file_basename}.cmake")
# Setup properties for obj files:
if( NOT cuda_compile_to_external_module )
set_source_files_properties("${generated_file}"
PROPERTIES
EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked.
)
endif()
# Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path.
get_filename_component(file_path "${file}" PATH)
if(IS_ABSOLUTE "${file_path}")
set(source_file "${file}")
else()
set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}")
endif()
if( NOT cuda_compile_to_external_module AND CUDA_SEPARABLE_COMPILATION)
list(APPEND ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS "${generated_file}")
endif()
# Convience string for output ###########################################
if(CUDA_BUILD_EMULATION)
set(cuda_build_type "Emulation")
else()
set(cuda_build_type "Device")
endif()
# Build the NVCC made dependency file ###################################
set(build_cubin OFF)
if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
if ( NOT cuda_compile_to_external_module )
set ( build_cubin ON )
endif()
endif()
# Configure the build script
configure_file("${CUDA_run_nvcc}" "${custom_target_script}" @ONLY)
# So if a user specifies the same cuda file as input more than once, you
# can have bad things happen with dependencies. Here we check an option
# to see if this is the behavior they want.
if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
set(main_dep MAIN_DEPENDENCY ${source_file})
else()
set(main_dep DEPENDS ${source_file})
endif()
if(CUDA_VERBOSE_BUILD)
set(verbose_output ON)
elseif(CMAKE_GENERATOR MATCHES "Makefiles")
set(verbose_output "$(VERBOSE)")
else()
set(verbose_output OFF)
endif()
# Create up the comment string
file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}")
if(cuda_compile_to_external_module)
set(cuda_build_comment_string "Building NVCC ${cuda_compile_to_external_module_type} file ${generated_file_relative_path}")
else()
set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}")
endif()
# Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND #######
cuda_include_nvcc_dependencies(${cmake_dependency_file})
# Check to see if the build script is newer than the dependency file. If
# it is, regenerate it.
# message("CUDA_GENERATE_DEPENDENCIES_DURING_CONFIGURE = ${CUDA_GENERATE_DEPENDENCIES_DURING_CONFIGURE}")
# message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}")
# execute_process(COMMAND ls -lTtr "${custom_target_script}" "${cmake_dependency_file}" "${NVCC_generated_dependency_file}")
set(_cuda_generate_dependencies FALSE)
# Note that NVCC_generated_dependency_file is always generated.
if(CUDA_GENERATE_DEPENDENCIES_DURING_CONFIGURE
AND "${custom_target_script}" IS_NEWER_THAN "${NVCC_generated_dependency_file}")
# If the two files were generated about the same time then reversing the
# comparison will also be true, so check the CUDA_NVCC_DEPEND_REGENERATE
# flag.
if ("${NVCC_generated_dependency_file}" IS_NEWER_THAN "${custom_target_script}")
# message("************************************************************************")
# message("Same modification time: ${custom_target_script} ${NVCC_generated_dependency_file}")
if (CUDA_NVCC_DEPEND_REGENERATE OR NOT EXISTS "${NVCC_generated_dependency_file}")
set(_cuda_generate_dependencies TRUE)
endif()
else()
# The timestamp check is valid
set(_cuda_generate_dependencies TRUE)
endif()
endif()
# message("_cuda_generate_dependencies = ${_cuda_generate_dependencies}")
# If we needed to regenerate the dependency file, do so now.
if (_cuda_generate_dependencies)
set(_cuda_dependency_ccbin)
# message("CUDA_HOST_COMPILER = ${CUDA_HOST_COMPILER}")
if(ccbin_flags MATCHES "\\$\\(VCInstallDir\\)")
set(_cuda_dependency_ccbin_dir)
if (CUDA_VS_DIR AND EXISTS "${CUDA_VS_DIR}/VC/bin")
set(_cuda_dependency_ccbin_dir "${CUDA_VS_DIR}/VC/bin")
elseif( EXISTS "${CMAKE_CXX_COMPILER}" )
get_filename_component(_cuda_dependency_ccbin_dir "${CMAKE_CXX_COMPILER}" DIRECTORY)
endif()
if( _cuda_dependency_ccbin_dir )
set(_cuda_dependency_ccbin -D "CCBIN:PATH=${_cuda_dependency_ccbin_dir}")
endif()
elseif(ccbin_flags)
# The CUDA_HOST_COMPILER is set to something interesting, so use the
# ccbin_flags as-is.
set(_cuda_dependency_ccbin ${ccbin_flags})
endif()
# message("_cuda_dependency_ccbin = ${_cuda_dependency_ccbin}")
if(_cuda_dependency_ccbin OR NOT ccbin_flags)
# Only do this if we have some kind of host compiler defined in
# _cuda_dependency_ccbin or ccbin_flags isn't set.
set( _execute_process_args
COMMAND ${CMAKE_COMMAND}
-D generate_dependency_only:BOOL=TRUE
-D verbose:BOOL=TRUE
${_cuda_dependency_ccbin}
-D "generated_file:STRING=${generated_file}"
-D "generated_cubin_file:STRING=${generated_cubin_file}"
-P "${custom_target_script}"
WORKING_DIRECTORY "${cuda_compile_intermediate_directory}"
RESULT_VARIABLE _cuda_dependency_error
OUTPUT_VARIABLE _cuda_dependency_output
ERROR_VARIABLE _cuda_dependency_output
)
if( CUDA_BATCH_DEPENDS_LOG )
file( APPEND ${CUDA_BATCH_DEPENDS_LOG} "COMMENT;Generating dependencies for ${file};${_execute_process_args}\n" )
else()
message(STATUS "Generating dependencies for ${file}")
execute_process(
${_execute_process_args}
)
endif()
if (_cuda_dependency_error)
message(WARNING "Error (${_cuda_dependency_error}) generating dependencies for ${file}:\n\n${_cuda_dependency_output}. This will be postponed until build time.")
else()
# Try and reload the dependies
cuda_include_nvcc_dependencies(${cmake_dependency_file})
endif()
endif()
endif()
# Build the generated file and dependency file ##########################
set( _custom_command_args
OUTPUT ${generated_file}
# These output files depend on the source_file and the contents of cmake_dependency_file
${main_dep}
DEPENDS ${CUDA_NVCC_DEPEND}
DEPENDS ${custom_target_script}
# Make sure the output directory exists before trying to write to it.
COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}"
COMMAND ${CMAKE_COMMAND}
-D verbose:BOOL=${verbose_output}
-D check_dependencies:BOOL=${CUDA_CHECK_DEPENDENCIES_DURING_COMPILE}
${ccbin_flags}
-D build_configuration:STRING=${CUDA_build_configuration}
-D "generated_file:STRING=${generated_file}"
-D "generated_cubin_file:STRING=${generated_cubin_file}"
-D "generated_fatbin_file:STRING=${generated_fatbin_file}"
-P "${custom_target_script}"
WORKING_DIRECTORY "${cuda_compile_intermediate_directory}"
COMMENT "${cuda_build_comment_string}"
)
if( CUDA_BATCH_BUILD_LOG )
list(APPEND CUDA_BATCH_BUILD_OUTPUTS ${generated_file})
set_property( GLOBAL APPEND PROPERTY CUDA_BATCH_BUILD_DEPENDS ${source_file} ${CUDA_NVCC_DEPEND} ${custom_target_script})
file( APPEND ${CUDA_BATCH_BUILD_LOG} "${_custom_command_args}\n" )
endif()
add_custom_command( ${_custom_command_args} )
# Make sure the build system knows the file is generated.
set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE)
list(APPEND _cuda_wrap_generated_files ${generated_file})
# Add the other files that we want cmake to clean on a cleanup ##########
list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}")
list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES)
set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
endif()
endforeach()
# Set the return parameter
set(${generated_files} ${_cuda_wrap_generated_files})
endmacro()
function(_cuda_get_important_host_flags important_flags flag_string)
if(CMAKE_GENERATOR MATCHES "Visual Studio")
string(REGEX MATCHALL "/M[DT][d]?" flags "${flag_string}")
list(APPEND ${important_flags} ${flags})
else()
string(REGEX MATCHALL "-fPIC" flags "${flag_string}")
list(APPEND ${important_flags} ${flags})
endif()
set(${important_flags} ${${important_flags}} PARENT_SCOPE)
endfunction()
###############################################################################
###############################################################################
# Separable Compilation Link
###############################################################################
###############################################################################
# Compute the filename to be used by CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS
function(CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME output_file_var cuda_target object_files)
if (object_files)
set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${CMAKE_CFG_INTDIR}/${cuda_target}_intermediate_link${generated_extension}")
else()
set(output_file)
endif()
set(${output_file_var} "${output_file}" PARENT_SCOPE)
endfunction()
# Setup the build rule for the separable compilation intermediate link file.
function(CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS output_file cuda_target options object_files)
if (object_files)
set_source_files_properties("${output_file}"
PROPERTIES
EXTERNAL_OBJECT TRUE # This is an object file not to be compiled, but only
# be linked.
GENERATED TRUE # This file is generated during the build
)
# For now we are ignoring all the configuration specific flags.
set(nvcc_flags)
CUDA_PARSE_NVCC_OPTIONS(nvcc_flags ${options})
if(CUDA_64_BIT_DEVICE_CODE)
list(APPEND nvcc_flags -m64)
else()
list(APPEND nvcc_flags -m32)
endif()
# If -ccbin, --compiler-bindir has been specified, don't do anything. Otherwise add it here.
list( FIND nvcc_flags "-ccbin" ccbin_found0 )
list( FIND nvcc_flags "--compiler-bindir" ccbin_found1 )
if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER )
# Match VERBATIM check below.
if(CUDA_HOST_COMPILER MATCHES "\\$\\(VCInstallDir\\)")
list(APPEND nvcc_flags -ccbin "\"${CUDA_HOST_COMPILER}\"")
else()
list(APPEND nvcc_flags -ccbin "${CUDA_HOST_COMPILER}")
endif()
endif()
# Create a list of flags specified by CUDA_NVCC_FLAGS_${CONFIG} and CMAKE_${CUDA_C_OR_CXX}_FLAGS*
set(config_specific_flags)
set(flags)
foreach(config ${CUDA_configuration_types})
string(TOUPPER ${config} config_upper)
# Add config specific flags
foreach(f ${CUDA_NVCC_FLAGS_${config_upper}})
list(APPEND config_specific_flags $<$<CONFIG:${config}>:${f}>)
endforeach()
set(important_host_flags)
_cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
foreach(f ${important_host_flags})
list(APPEND flags $<$<CONFIG:${config}>:-Xcompiler> $<$<CONFIG:${config}>:${f}>)
endforeach()
endforeach()
# Add CMAKE_${CUDA_C_OR_CXX}_FLAGS
set(important_host_flags)
_cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}")
foreach(f ${important_host_flags})
list(APPEND flags -Xcompiler ${f})
endforeach()
# Add our general CUDA_NVCC_FLAGS with the configuration specifig flags
set(nvcc_flags ${CUDA_NVCC_FLAGS} ${config_specific_flags} ${nvcc_flags})
file(RELATIVE_PATH output_file_relative_path "${CMAKE_BINARY_DIR}" "${output_file}")
# Some generators don't handle the multiple levels of custom command
# dependencies correctly (obj1 depends on file1, obj2 depends on obj1), so
# we work around that issue by compiling the intermediate link object as a
# pre-link custom command in that situation.
set(do_obj_build_rule TRUE)
if (MSVC_VERSION GREATER 1599 AND MSVC_VERSION LESS 1800)
# VS 2010 and 2012 have this problem.
set(do_obj_build_rule FALSE)
endif()
set(_verbatim VERBATIM)
if(nvcc_flags MATCHES "\\$\\(VCInstallDir\\)")
set(_verbatim "")
endif()
if (do_obj_build_rule)
add_custom_command(
OUTPUT ${output_file}
DEPENDS ${object_files}
COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} -dlink ${object_files} -o ${output_file}
${flags}
COMMENT "Building NVCC intermediate link file ${output_file_relative_path}"
${_verbatim}
)
else()
get_filename_component(output_file_dir "${output_file}" DIRECTORY)
add_custom_command(
TARGET ${cuda_target}
PRE_LINK
COMMAND ${CMAKE_COMMAND} -E echo "Building NVCC intermediate link file ${output_file_relative_path}"
COMMAND ${CMAKE_COMMAND} -E make_directory "${output_file_dir}"
COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} ${flags} -dlink ${object_files} -o "${output_file}"
${_verbatim}
)
endif()
endif()
endfunction()
###############################################################################
###############################################################################
# ADD LIBRARY
###############################################################################
###############################################################################
macro(CUDA_ADD_LIBRARY cuda_target)
CUDA_ADD_CUDA_INCLUDE_ONCE()
# Separate the sources from the options
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN})
# Create custom commands and targets for each file.
CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources}
${_cmake_options} ${_cuda_shared_flag}
OPTIONS ${_options} )
# Compute the file name of the intermedate link file used for separable
# compilation.
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
# Add the library.
add_library(${cuda_target} ${_cmake_options}
${_generated_files}
${_sources}
${link_file}
)
# Add a link phase for the separable compilation if it has been enabled. If
# it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS
# variable will have been defined.
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
target_link_libraries(${cuda_target}
${CUDA_LIBRARIES}
)
if(CUDA_SEPARABLE_COMPILATION)
target_link_libraries(${cuda_target}
${CUDA_cudadevrt_LIBRARY}
)
endif()
# We need to set the linker language based on what the expected generated file
# would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
set_target_properties(${cuda_target}
PROPERTIES
LINKER_LANGUAGE ${CUDA_C_OR_CXX}
)
endmacro()
###############################################################################
###############################################################################
# ADD EXECUTABLE
###############################################################################
###############################################################################
macro(CUDA_ADD_EXECUTABLE cuda_target)
CUDA_ADD_CUDA_INCLUDE_ONCE()
# Separate the sources from the options
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
# Create custom commands and targets for each file.
CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} )
# Compute the file name of the intermedate link file used for separable
# compilation.
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
# Add the library.
add_executable(${cuda_target} ${_cmake_options}
${_generated_files}
${_sources}
${link_file}
)
# Add a link phase for the separable compilation if it has been enabled. If
# it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS
# variable will have been defined.
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
target_link_libraries(${cuda_target}
${CUDA_LIBRARIES}
)
# We need to set the linker language based on what the expected generated file
# would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
set_target_properties(${cuda_target}
PROPERTIES
LINKER_LANGUAGE ${CUDA_C_OR_CXX}
)
endmacro()
###############################################################################
###############################################################################
# (Internal) helper for manually added cuda source files with specific targets
###############################################################################
###############################################################################
macro(cuda_compile_base cuda_target format generated_files)
# Separate the sources from the options
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
# Create custom commands and targets for each file.
CUDA_WRAP_SRCS( ${cuda_target} ${format} _generated_files ${_sources}
${_cmake_options} OPTIONS ${_options})
set( ${generated_files} ${_generated_files})
endmacro()
###############################################################################
###############################################################################
# CUDA COMPILE
###############################################################################
###############################################################################
macro(CUDA_COMPILE generated_files)
cuda_compile_base(cuda_compile OBJ ${generated_files} ${ARGN})
endmacro()
###############################################################################
###############################################################################
# CUDA COMPILE PTX
###############################################################################
###############################################################################
macro(CUDA_COMPILE_PTX generated_files)
cuda_compile_base(cuda_compile_ptx PTX ${generated_files} ${ARGN})
endmacro()
###############################################################################
###############################################################################
# CUDA COMPILE FATBIN
###############################################################################
###############################################################################
macro(CUDA_COMPILE_FATBIN generated_files)
cuda_compile_base(cuda_compile_fatbin FATBIN ${generated_files} ${ARGN})
endmacro()
###############################################################################
###############################################################################
# CUDA COMPILE CUBIN
###############################################################################
###############################################################################
macro(CUDA_COMPILE_CUBIN generated_files)
cuda_compile_base(cuda_compile_cubin CUBIN ${generated_files} ${ARGN})
endmacro()
###############################################################################
###############################################################################
# CUDA ADD CUFFT TO TARGET
###############################################################################
###############################################################################
macro(CUDA_ADD_CUFFT_TO_TARGET target)
if (CUDA_BUILD_EMULATION)
target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY})
else()
target_link_libraries(${target} ${CUDA_cufft_LIBRARY})
endif()
endmacro()
###############################################################################
###############################################################################
# CUDA ADD CUBLAS TO TARGET
###############################################################################
###############################################################################
macro(CUDA_ADD_CUBLAS_TO_TARGET target)
if (CUDA_BUILD_EMULATION)
target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY})
else()
target_link_libraries(${target} ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY})
endif()
endmacro()
###############################################################################
###############################################################################
# CUDA BUILD CLEAN TARGET
###############################################################################
###############################################################################
macro(CUDA_BUILD_CLEAN_TARGET)
# Call this after you add all your CUDA targets, and you will get a convience
# target. You should also make clean after running this target to get the
# build system to generate all the code again.
set(cuda_clean_target_name clean_cuda_depends)
if (CMAKE_GENERATOR MATCHES "Visual Studio")
string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name)
endif()
add_custom_target(${cuda_clean_target_name}
COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES})
# Clear out the variable, so the next time we configure it will be empty.
# This is useful so that the files won't persist in the list after targets
# have been removed.
set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
endmacro()
##############################################################################
###############################################################################
# CUDA BATCH BUILD BEGIN
###############################################################################
###############################################################################
function(CUDA_BATCH_BUILD_BEGIN target)
if(MSVC AND CUDA_ENABLE_BATCHING)
set( CUDA_BATCH_BUILD_LOG "${CMAKE_BINARY_DIR}/CMakeFiles/${target}.dir/cudaBatchBuild.log" )
file( REMOVE ${CUDA_BATCH_BUILD_LOG} )
set( CUDA_BATCH_BUILD_LOG "${CUDA_BATCH_BUILD_LOG}" PARENT_SCOPE ) # export variable
set_property( GLOBAL PROPERTY CUDA_BATCH_BUILD_DEPENDS "" )
endif()
endfunction()
###############################################################################
###############################################################################
# CUDA BATCH BUILD END
###############################################################################
###############################################################################
function(CUDA_BATCH_BUILD_END target)
set(BATCH_CMAKE_SCRIPT "${CMAKE_SOURCE_DIR}/CMake/cuda/FindCUDA/batchCMake.py")
find_package(PythonInterp)
if( CUDA_BATCH_BUILD_LOG )
set(cuda_batch_build_target "_${target}_cudaBatchBuild")
set(stamp_dir "${CMAKE_BINARY_DIR}/CMakeFiles/${target}.dir/${CMAKE_CFG_INTDIR}")
set(stamp_file ${stamp_dir}/cuda-batch-build.stamp)
get_property(cuda_depends GLOBAL PROPERTY CUDA_BATCH_BUILD_DEPENDS)
list(REMOVE_DUPLICATES cuda_depends)
add_custom_target( ${cuda_batch_build_target}
COMMENT "CUDA batch build ${cuda_batch_build_target}..."
COMMAND "${PYTHON_EXECUTABLE}" "${BATCH_CMAKE_SCRIPT}" -t ${cuda_batch_build_target} -c ${CUDA_BATCH_BUILD_LOG} -s "\"%24(VCInstallDir)=$(VCInstallDir)\\\"" -s "%24(ConfigurationName)=$(ConfigurationName)" -s "%24(Configuration)=$(Configuration)" -s "%24(VCToolsVersion)=$(VCToolsVersion)" -s "%24(Platform)=$(Platform)" -s "%24(PlatformTarget)=$(PlatformTarget)" # %24 is the '$' character - needed to escape '$' in VS rule
DEPENDS ${cuda_depends}
)
add_dependencies( ${target} ${cuda_batch_build_target} )
endif()
set( CUDA_BATCH_BUILD_LOG )
set_property( GLOBAL PROPERTY CUDA_BATCH_BUILD_DEPENDS "" )
endfunction()
##############################################################################
###############################################################################
# CUDA BATCH DEPENDS BEGIN
###############################################################################
###############################################################################
function(CUDA_BATCH_DEPENDS_BEGIN)
if(MSVC AND CUDA_ENABLE_BATCHING)
set( CUDA_BATCH_DEPENDS_LOG "${CMAKE_BINARY_DIR}/CMakeFiles/cudaBatchDepends.log" )
file( REMOVE ${CUDA_BATCH_DEPENDS_LOG} )
set( CUDA_BATCH_DEPENDS_LOG "${CUDA_BATCH_DEPENDS_LOG}" PARENT_SCOPE ) # export variable
set(BATCH_CMAKE_SCRIPT "${CMAKE_SOURCE_DIR}/CMake/cuda/FindCUDA/batchCMake.py")
# Create batch file to setup VS environment, since CUDA 8 broke running nvcc outside
# of VS environment. You could get around this with the following command with newer
# versions of cmake (3.5.2 worked for me, but 3.2.1 didn't like the && ):
# execute_process( COMMAND ${CUDA_VC_VARS_ALL_BAT} && ${PYTHON_EXECUTABLE} ... )
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(BUILD_BITS amd64)
else()
set(BUILD_BITS x86)
endif()
file(WRITE ${CUDA_BATCH_DEPENDS_LOG}.vsconfigure.bat
"@echo OFF\n"
"REM Created by FindCUDA.cmake\n"
"@call \"${CUDA_VC_VARS_ALL_BAT}\" ${BUILD_BITS}\n"
"\"${PYTHON_EXECUTABLE}\" \"${BATCH_CMAKE_SCRIPT}\" -e \"${CUDA_BATCH_DEPENDS_LOG}\" -t \"CUDA batch dependencies\""
)
endif()
endfunction()
###############################################################################
###############################################################################
# CUDA BATCH DEPENDS END
###############################################################################
###############################################################################
function(CUDA_BATCH_DEPENDS_END)
if( CUDA_BATCH_DEPENDS_LOG )
if( EXISTS ${CUDA_BATCH_DEPENDS_LOG} )
message(STATUS "CUDA batch dependencies ...")
execute_process(
COMMAND ${CUDA_BATCH_DEPENDS_LOG}.vsconfigure.bat
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
OUTPUT_VARIABLE _cuda_batch_depends_output
ERROR_VARIABLE _cuda_batch_depends_output
RESULT_VARIABLE _cuda_batch_depends_result
)
message(STATUS ${_cuda_batch_depends_output})
if( ${_cuda_batch_depends_result} )
message(FATAL_ERROR "Failed")
else()
message(SEND_ERROR "Please reconfigure to ensure that dependencies are incorporated into the build files")
endif()
endif()
endif()
set( CUDA_BATCH_DEPENDS_LOG )
endfunction()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindCUDA/make2cmake.cmake | CMake | # James Bigler, NVIDIA Corp (nvidia.com - jbigler)
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# Copyright (c) 2007-2009
# Scientific Computing and Imaging Institute, University of Utah
#
# This code is licensed under the MIT License. See the FindCUDA.cmake script
# for the text of the license.
# The MIT License
#
# License for the specific language governing rights and limitations under
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
#######################################################################
# This converts a file written in makefile syntax into one that can be included
# by CMake.
# Input variables
#
# verbose:BOOL=<> OFF: Be as quiet as possible (default)
# ON : Extra output
#
# input_file:FILEPATH=<> Path to dependecy file in makefile format
#
# output_file:FILEPATH=<> Path to file with dependencies in CMake readable variable
#
file(READ ${input_file} depend_text)
if (NOT "${depend_text}" STREQUAL "")
# message("FOUND DEPENDS")
string(REPLACE "\\ " " " depend_text ${depend_text})
# This works for the nvcc -M generated dependency files.
string(REGEX REPLACE "^.* : " "" depend_text ${depend_text})
string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text})
set(dependency_list "")
foreach(file ${depend_text})
string(REGEX REPLACE "^ +" "" file ${file})
# OK, now if we had a UNC path, nvcc has a tendency to only output the first '/'
# instead of '//'. Here we will test to see if the file exists, if it doesn't then
# try to prepend another '/' to the path and test again. If it still fails remove the
# path.
if(NOT EXISTS "${file}")
if (EXISTS "/${file}")
set(file "/${file}")
else()
if(verbose)
message(WARNING " Removing non-existent dependency file: ${file}")
endif()
set(file "")
endif()
endif()
# Make sure we check to see if we have a file, before asking if it is not a directory.
# if(NOT IS_DIRECTORY "") will return TRUE.
if(file AND NOT IS_DIRECTORY "${file}")
# If softlinks start to matter, we should change this to REALPATH. For now we need
# to flatten paths, because nvcc can generate stuff like /bin/../include instead of
# just /include.
get_filename_component(file_absolute "${file}" ABSOLUTE)
list(APPEND dependency_list "${file_absolute}")
endif()
endforeach()
else()
# message("FOUND NO DEPENDS")
endif()
# Remove the duplicate entries and sort them.
list(REMOVE_DUPLICATES dependency_list)
list(SORT dependency_list)
foreach(file ${dependency_list})
set(cuda_nvcc_depend "${cuda_nvcc_depend} \"${file}\"\n")
endforeach()
file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindCUDA/parse_cubin.cmake | CMake | # James Bigler, NVIDIA Corp (nvidia.com - jbigler)
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# Copyright (c) 2007-2009
# Scientific Computing and Imaging Institute, University of Utah
#
# This code is licensed under the MIT License. See the FindCUDA.cmake script
# for the text of the license.
# The MIT License
#
# License for the specific language governing rights and limitations under
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
#######################################################################
# Parses a .cubin file produced by nvcc and reports statistics about the file.
file(READ ${input_file} file_text)
if (NOT "${file_text}" STREQUAL "")
string(REPLACE ";" "\\;" file_text ${file_text})
string(REPLACE "\ncode" ";code" file_text ${file_text})
list(LENGTH file_text len)
foreach(line ${file_text})
# Only look at "code { }" blocks.
if(line MATCHES "^code")
# Break into individual lines.
string(REGEX REPLACE "\n" ";" line ${line})
foreach(entry ${line})
# Extract kernel names.
if (${entry} MATCHES "[^g]name = ([^ ]+)")
set(entry "${CMAKE_MATCH_1}")
# Check to see if the kernel name starts with "_"
set(skip FALSE)
# if (${entry} MATCHES "^_")
# Skip the rest of this block.
# message("Skipping ${entry}")
# set(skip TRUE)
# else ()
message("Kernel: ${entry}")
# endif ()
endif()
# Skip the rest of the block if necessary
if(NOT skip)
# Registers
if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)")
set(entry "${CMAKE_MATCH_3}")
message("Registers: ${entry}")
endif()
# Local memory
if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)")
set(entry "${CMAKE_MATCH_3}")
message("Local: ${entry}")
endif()
# Shared memory
if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)")
set(entry "${CMAKE_MATCH_3}")
message("Shared: ${entry}")
endif()
if (${entry} MATCHES "^}")
message("")
endif()
endif()
endforeach()
endif()
endforeach()
else()
# message("FOUND NO DEPENDS")
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindCUDA/run_nvcc.cmake | CMake | # James Bigler, NVIDIA Corp (nvidia.com - jbigler)
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# This code is licensed under the MIT License. See the FindCUDA.cmake script
# for the text of the license.
# The MIT License
#
# License for the specific language governing rights and limitations under
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
##########################################################################
# This file runs the nvcc commands to produce the desired output file along with
# the dependency file needed by CMake to compute dependencies. In addition the
# file checks the output of each command and if the command fails it deletes the
# output files.
# Input variables
#
# verbose:BOOL=<> OFF: Be as quiet as possible (default)
# ON : Describe each step
#
# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or
# RelWithDebInfo, but it should match one of the
# entries in CUDA_HOST_FLAGS. This is the build
# configuration used when compiling the code. If
# blank or unspecified Debug is assumed as this is
# what CMake does.
#
# generated_file:STRING=<> File to generate. This argument must be passed in.
#
# generated_cubin_file:STRING=<> File to generate. This argument must be passed
# in if build_cubin is true.
# generate_dependency_only:BOOL=<> Only generate the dependency file.
#
# check_dependencies:BOOL=<> Check the dependencies. If everything is up to
# date, simply touch the output file instead of
# generating it.
if(NOT generated_file)
message(FATAL_ERROR "You must specify generated_file on the command line")
endif()
# Set these up as variables to make reading the generated file easier
set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path
set(source_file "@source_file@") # path
set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path
set(cmake_dependency_file "@cmake_dependency_file@") # path
set(CUDA_make2cmake "@CUDA_make2cmake@") # path
set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path
set(build_cubin @build_cubin@) # bool
set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path
# We won't actually use these variables for now, but we need to set this, in
# order to force this file to be run again if it changes.
set(generated_file_path "@generated_file_path@") # path
set(generated_file_internal "@generated_file@") # path
set(generated_cubin_file_internal "@generated_cubin_file@") # path
set(CUDA_REMOVE_GLOBAL_MEMORY_SPACE_WARNING @CUDA_REMOVE_GLOBAL_MEMORY_SPACE_WARNING@)
set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path
set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list
@CUDA_NVCC_FLAGS_CONFIG@
set(nvcc_flags @nvcc_flags@) # list
set(CUDA_NVCC_INCLUDE_ARGS "@CUDA_NVCC_INCLUDE_ARGS@") # list (needs to be in quotes to handle spaces properly).
set(format_flag "@format_flag@") # string
set(cuda_language_flag @cuda_language_flag@) # list
if(build_cubin AND NOT generated_cubin_file)
message(FATAL_ERROR "You must specify generated_cubin_file on the command line")
endif()
# This is the list of host compilation flags. It C or CXX should already have
# been chosen by FindCUDA.cmake.
@CUDA_HOST_FLAGS@
# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler
set(nvcc_host_compiler_flags "")
# If we weren't given a build_configuration, use Debug.
if(NOT build_configuration)
set(build_configuration Debug)
endif()
string(TOUPPER "${build_configuration}" build_configuration)
#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}")
foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}})
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
if ("${nvcc_host_compiler_flags}" STREQUAL "")
set(nvcc_host_compiler_flags "\"${flag}\"")
else()
set(nvcc_host_compiler_flags "${nvcc_host_compiler_flags},\"${flag}\"")
endif()
endforeach()
if (nvcc_host_compiler_flags)
set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags})
endif()
#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"")
set(depends_nvcc_host_compiler_flags "")
foreach(flag ${CMAKE_HOST_FLAGS} )
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
if ("${depends_nvcc_host_compiler_flags}" STREQUAL "")
set(depends_nvcc_host_compiler_flags "\"${flag}\"")
else()
set(depends_nvcc_host_compiler_flags "${depends_nvcc_host_compiler_flags},\"${flag}\"")
endif()
endforeach()
if (depends_nvcc_host_compiler_flags)
set(depends_nvcc_host_compiler_flags "-Xcompiler" ${depends_nvcc_host_compiler_flags})
endif()
list(APPEND depends_CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS})
# Add the build specific configuration flags
list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}})
# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority
list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 )
list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 )
if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER )
if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN)
set(CCBIN -ccbin "${CCBIN}")
else()
set(CCBIN -ccbin "${CUDA_HOST_COMPILER}")
endif()
endif()
# cuda_execute_process - Executes a command with optional command echo and status message.
#
# status - Status message to print if verbose is true
# command - COMMAND argument from the usual execute_process argument structure
# ARGN - Remaining arguments are the command with arguments
#
# CUDA_result - return value from running the command
#
# Make this a macro instead of a function, so that things like RESULT_VARIABLE
# and other return variables are present after executing the process.
macro(cuda_execute_process status command)
set(_command ${command})
if(NOT "x${_command}" STREQUAL "xCOMMAND")
message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})")
endif()
if(verbose)
execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status})
# Now we need to build up our command string. We are accounting for quotes
# and spaces, anything else is left up to the user to fix if they want to
# copy and paste a runnable command line.
set(cuda_execute_process_string)
foreach(arg ${ARGN})
# If there are quotes, excape them, so they come through.
string(REPLACE "\"" "\\\"" arg ${arg})
# Args with spaces need quotes around them to get them to be parsed as a single argument.
if(arg MATCHES " ")
list(APPEND cuda_execute_process_string "\"${arg}\"")
else()
list(APPEND cuda_execute_process_string ${arg})
endif()
endforeach()
# Echo the command
execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string})
endif()
# Run the command
execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result )
endmacro()
# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag
# for dependency generation and hope for the best.
set(CUDA_VERSION @CUDA_VERSION@)
if(CUDA_VERSION VERSION_LESS "3.0")
cmake_policy(PUSH)
# CMake policy 0007 NEW states that empty list elements are not
# ignored. I'm just setting it to avoid the warning that's printed.
cmake_policy(SET CMP0007 NEW)
# Note that this will remove all occurances of -G.
list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G")
cmake_policy(POP)
endif()
# If we need to create relocatible code, the dependency phase doesn't like this argument.
# We need to filter it out here.
list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-dc")
list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "--device-c")
# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This
# can cause incorrect dependencies when #including files based on this macro which is
# defined in the generating passes of nvcc invokation. We will go ahead and manually
# define this for now until a future version fixes this bug.
set(CUDACC_DEFINE -D__CUDACC__)
if (check_dependencies)
set(rebuild FALSE)
include(${cmake_dependency_file})
if(NOT CUDA_NVCC_DEPEND)
# CUDA_NVCC_DEPEND should have something useful in it by now. If not we
# should force the rebuild.
if (verbose)
message(WARNING "CUDA_NVCC_DEPEND not found for ${generated_file}")
endif()
set(rebuild TRUE)
endif()
# Rebuilding is also dependent on this file changing.
list(APPEND CUDA_NVCC_DEPEND "${CMAKE_CURRENT_LIST_FILE}")
foreach(f ${CUDA_NVCC_DEPEND})
# True if file1 is newer than file2 or if one of the two files doesn't
# exist. Behavior is well-defined only for full paths. If the file time
# stamps are exactly the same, an IS_NEWER_THAN comparison returns true, so
# that any dependent build operations will occur in the event of a tie. This
# includes the case of passing the same file name for both file1 and file2.
if("${f}" IS_NEWER_THAN "${generated_file}")
#message("file ${f} is newer than ${generated_file}")
set(rebuild TRUE)
endif()
endforeach()
if (NOT rebuild)
#message("Not rebuilding ${generated_file}")
cuda_execute_process(
"Dependencies up to date. Not rebuilding ${generated_file}"
COMMAND "${CMAKE_COMMAND}" -E touch "${generated_file}"
)
return()
endif()
endif()
# Generate the dependency file
cuda_execute_process(
"Generating dependency file: ${NVCC_generated_dependency_file}"
COMMAND "${CUDA_NVCC_EXECUTABLE}"
-M
${CUDACC_DEFINE}
"${source_file}"
-o "${NVCC_generated_dependency_file}"
${CCBIN}
${nvcc_flags}
${depends_nvcc_host_compiler_flags}
${depends_CUDA_NVCC_FLAGS}
-DNVCC
${CUDA_NVCC_INCLUDE_ARGS}
)
if(CUDA_result)
message(FATAL_ERROR "Error generating ${generated_file}")
endif()
# Generate the cmake readable dependency file to a temp file. Don't put the
# quotes just around the filenames for the input_file and output_file variables.
# CMake will pass the quotes through and not be able to find the file.
cuda_execute_process(
"Generating temporary cmake readable file: ${cmake_dependency_file}.tmp"
COMMAND "${CMAKE_COMMAND}"
-D "input_file:FILEPATH=${NVCC_generated_dependency_file}"
-D "output_file:FILEPATH=${cmake_dependency_file}.tmp"
-P "${CUDA_make2cmake}"
)
if(CUDA_result)
message(FATAL_ERROR "Error generating ${generated_file}")
endif()
# Copy the file if it is different
cuda_execute_process(
"Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}"
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}"
)
if(CUDA_result)
message(FATAL_ERROR "Error generating ${generated_file}")
endif()
# Delete the temporary file
cuda_execute_process(
"Removing ${cmake_dependency_file}.tmp"
COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp"
)
if(CUDA_result)
message(FATAL_ERROR "Error generating ${generated_file}")
endif()
if (generate_dependency_only)
return()
endif()
if(CUDA_REMOVE_GLOBAL_MEMORY_SPACE_WARNING)
set(get_error ERROR_VARIABLE stderr)
endif()
# Delete the target file
cuda_execute_process(
"Removing ${generated_file}"
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
)
# Generate the code
cuda_execute_process(
"Generating ${generated_file}"
COMMAND "${CUDA_NVCC_EXECUTABLE}"
"${source_file}"
${cuda_language_flag}
${format_flag} -o "${generated_file}"
${CCBIN}
${nvcc_flags}
${nvcc_host_compiler_flags}
${CUDA_NVCC_FLAGS}
-DNVCC
${CUDA_NVCC_INCLUDE_ARGS}
${get_error}
)
if(get_error)
if(stderr)
# Filter out the annoying Advisory about pointer stuff.
# Advisory: Cannot tell what pointer points to, assuming global memory space
string(REGEX REPLACE "(^|\n)[^\n]*\(Advisory|Warning\): Cannot tell what pointer points to, assuming global memory space\n\n" "\\1" stderr "${stderr}")
# Filter out warning we do not care about
string(REGEX REPLACE "(^|\n)[^\n]*: Warning: Function [^\n]* has a large return size, so overriding noinline attribute. The function may be inlined when called.\n\n" "\\1" stderr "${stderr}")
# To be investigated (OP-1999)
string(REGEX REPLACE "(^|\n)[^\n]*: warning: function [^\n]*\n[^\n]*: here was declared deprecated \(.[^\n]* is not valid on compute_70 and above, and should be replaced with [^\n]*.To continue using [^\n]*, specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options.[^\n]*..\)\n( *detected during instantiation of [^\n]*\n[^\n]*: here\n)?( *detected during:\n( *instantiation of [^\n]*\n[^\n]*: here\n([^\n]*instantiation contexts not shown[^\n]*\n)?)+)?\n" "\\1" stderr "${stderr}")
string(REGEX REPLACE "(^|\n)[^\n]*: warning: function [^\n]*\n[^\n]*: here was declared deprecated \(.[^\n]* is deprecated in favor of [^\n]* and may be removed in a future release [^\n]*\)\n( *detected during instantiation of [^\n]*\n[^\n]*: here\n)?( *detected during:\n( *instantiation of [^\n]*\n[^\n]*: here\n([^\n]*instantiation contexts not shown[^\n]*\n)?)+)?\n" "\\1" stderr "${stderr}")
# If there is error output, there is usually a stray newline at the end. Eliminate it if it is the only content of ${stderr}.
string(REGEX REPLACE "^\n$" "" stderr "${stderr}")
if(stderr)
message("${stderr}")
endif()
endif()
endif()
if(CUDA_result)
# Since nvcc can sometimes leave half done files make sure that we delete the output file.
cuda_execute_process(
"Removing ${generated_file}"
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
)
message(FATAL_ERROR "Error generating file ${generated_file}")
else()
if(verbose)
message("Generated ${generated_file} successfully.")
endif()
endif()
# Cubin resource report commands.
if( build_cubin )
# Run with -cubin to produce resource usage report.
cuda_execute_process(
"Generating ${generated_cubin_file}"
COMMAND "${CUDA_NVCC_EXECUTABLE}"
"${source_file}"
${CUDA_NVCC_FLAGS}
${nvcc_flags}
${CCBIN}
${nvcc_host_compiler_flags}
-DNVCC
-cubin
-o "${generated_cubin_file}"
${CUDA_NVCC_INCLUDE_ARGS}
)
# Execute the parser script.
cuda_execute_process(
"Executing the parser script"
COMMAND "${CMAKE_COMMAND}"
-D "input_file:STRING=${generated_cubin_file}"
-P "${CUDA_parse_cubin}"
)
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindCUDA/select_compute_arch.cmake | CMake | # Synopsis:
# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures])
# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures
# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...)
# - "Auto" detects local machine GPU compute arch at runtime.
# - "Common" and "All" cover common and entire subsets of architectures
# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
# NAME: Fermi Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal
# NUM: Any number. Only those pairs are currently accepted by NVCC though:
# 2.0 2.1 3.0 3.2 3.5 3.7 5.0 5.2 5.3 6.0 6.2
# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable}
# Additionally, sets ${out_variable}_readable to the resulting numeric list
# Example:
# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell)
# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS})
#
# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA
#
# This list will be used for CUDA_ARCH_NAME = All option
set(CUDA_KNOWN_GPU_ARCHITECTURES "Fermi" "Kepler" "Maxwell")
# This list will be used for CUDA_ARCH_NAME = Common option (enabled by default)
set(CUDA_COMMON_GPU_ARCHITECTURES "3.0" "3.5" "5.0")
if (CUDA_VERSION VERSION_GREATER "6.5")
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Kepler+Tegra" "Kepler+Tesla" "Maxwell+Tegra")
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "5.2")
endif ()
if (CUDA_VERSION VERSION_GREATER "7.5")
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Pascal")
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "6.0" "6.1" "6.1+PTX")
else()
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "5.2+PTX")
endif ()
################################################################################################
# A function for automatic detection of GPUs installed (if autodetection is enabled)
# Usage:
# CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE)
#
function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE)
if(NOT CUDA_GPU_DETECT_OUTPUT)
set(cufile ${PROJECT_BINARY_DIR}/detect_cuda_archs.cu)
file(WRITE ${cufile} ""
"#include <cstdio>\n"
"int main()\n"
"{\n"
" int count = 0;\n"
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
" if (count == 0) return -1;\n"
" for (int device = 0; device < count; ++device)\n"
" {\n"
" cudaDeviceProp prop;\n"
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
" }\n"
" return 0;\n"
"}\n")
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${cufile}"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(nvcc_res EQUAL 0)
string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
set(CUDA_GPU_DETECT_OUTPUT ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_gpus tool" FORCE)
endif()
endif()
if(NOT CUDA_GPU_DETECT_OUTPUT)
message(STATUS "Automatic GPU detection failed. Building for common architectures.")
set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE)
else()
set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT} PARENT_SCOPE)
endif()
endfunction()
################################################################################################
# Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list
# Usage:
# SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs])
function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable)
set(CUDA_ARCH_LIST "${ARGN}")
if("X${CUDA_ARCH_LIST}" STREQUAL "X" )
set(CUDA_ARCH_LIST "Auto")
endif()
set(cuda_arch_bin)
set(cuda_arch_ptx)
if("${CUDA_ARCH_LIST}" STREQUAL "All")
set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES})
elseif("${CUDA_ARCH_LIST}" STREQUAL "Common")
set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES})
elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto")
CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST)
message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}")
endif()
# Now process the list and look for names
string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}")
list(REMOVE_DUPLICATES CUDA_ARCH_LIST)
foreach(arch_name ${CUDA_ARCH_LIST})
set(arch_bin)
set(add_ptx FALSE)
# Check to see if we are compiling PTX
if(arch_name MATCHES "(.*)\\+PTX$")
set(add_ptx TRUE)
set(arch_name ${CMAKE_MATCH_1})
endif()
if(arch_name MATCHES "^([0-9]\\.[0-9](\\([0-9]\\.[0-9]\\))?)$")
set(arch_bin ${CMAKE_MATCH_1})
set(arch_ptx ${arch_bin})
else()
# Look for it in our list of known architectures
if(${arch_name} STREQUAL "Fermi")
set(arch_bin 2.0 "2.1(2.0)")
elseif(${arch_name} STREQUAL "Kepler+Tegra")
set(arch_bin 3.2)
elseif(${arch_name} STREQUAL "Kepler+Tesla")
set(arch_bin 3.7)
elseif(${arch_name} STREQUAL "Kepler")
set(arch_bin 3.0 3.5)
set(arch_ptx 3.5)
elseif(${arch_name} STREQUAL "Maxwell+Tegra")
set(arch_bin 5.3)
elseif(${arch_name} STREQUAL "Maxwell")
set(arch_bin 5.0 5.2)
set(arch_ptx 5.2)
elseif(${arch_name} STREQUAL "Pascal")
set(arch_bin 6.0 6.1)
set(arch_ptx 6.1)
else()
message(SEND_ERROR "Unknown CUDA Architecture Name ${arch_name} in CUDA_SELECT_NVCC_ARCH_FLAGS")
endif()
endif()
if(NOT arch_bin)
message(SEND_ERROR "arch_bin wasn't set for some reason")
endif()
list(APPEND cuda_arch_bin ${arch_bin})
if(add_ptx)
if (NOT arch_ptx)
set(arch_ptx ${arch_bin})
endif()
list(APPEND cuda_arch_ptx ${arch_ptx})
endif()
endforeach()
# remove dots and convert to lists
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
if(cuda_arch_bin)
list(REMOVE_DUPLICATES cuda_arch_bin)
endif()
if(cuda_arch_ptx)
list(REMOVE_DUPLICATES cuda_arch_ptx)
endif()
set(nvcc_flags "")
set(nvcc_archs_readable "")
# Tell NVCC to add binaries for the specified GPUs
foreach(arch ${cuda_arch_bin})
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
# User explicitly specified ARCH for the concrete CODE
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
else()
# User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
list(APPEND nvcc_archs_readable sm_${arch})
endif()
endforeach()
# Tell NVCC to add PTX intermediate code for the specified architectures
foreach(arch ${cuda_arch_ptx})
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
list(APPEND nvcc_archs_readable compute_${arch})
endforeach()
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
endfunction()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindDX.cmake | CMake | IF (WIN32 AND MSVC_VERSION LESS 1700)
# Starting with Windows 8, the DirectX SDK is included as part of the Windows SDK
# (http://msdn.microsoft.com/en-us/library/windows/desktop/ee663275.aspx)
# and, in turn, Visual Studio 2012 (even Express) includes the appropriate components of the Windows SDK
# (http://msdn.microsoft.com/en-us/windows/desktop/hh852363.aspx)
# so we don't need a DX include path if we're targeting VS2012+
FIND_PATH(DX9_INCLUDE_PATH d3d9.h
HINTS
"$ENV{DXSDK_DIR}/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK/Include"
DOC "The directory where d3d9.h resides")
FIND_PATH(DX10_INCLUDE_PATH D3D10.h
HINTS
"$ENV{DXSDK_DIR}/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK/Include"
DOC "The directory where D3D10.h resides")
FIND_PATH(DX11_INCLUDE_PATH D3D11.h
HINTS
"$ENV{DXSDK_DIR}/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK/Include"
DOC "The directory where D3D11.h resides")
IF (DX9_INCLUDE_PATH)
SET( DX9_FOUND 1 )
ELSE (DX9_INCLUDE_PATH)
SET( DX9_FOUND 0 )
ENDIF (DX9_INCLUDE_PATH)
IF (DX10_INCLUDE_PATH)
SET( DX10_FOUND 1 )
ELSE (DX10_INCLUDE_PATH)
SET( DX10_FOUND 0 )
ENDIF (DX10_INCLUDE_PATH)
IF (DX11_INCLUDE_PATH)
SET( DX11_FOUND 1 )
ELSE (DX11_INCLUDE_PATH)
SET( DX11_FOUND 0 )
ENDIF (DX11_INCLUDE_PATH)
ENDIF (WIN32 AND MSVC_VERSION LESS 1700)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindNVTX.cmake | CMake | #
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# Output variables:
# NVTX_FOUND
# NVTX_INCLUDE_DIR
if( WIN32 )
# On Windows, the NVTX headers are in the include directory of the CUDA Toolkit
find_path( NVTX_INCLUDE_DIR
NAMES device_functions.h
PATHS
${CUDA_TOOLKIT_ROOT_DIR}
ENV CUDA_PATH
ENV CUDA_INC_PATH
PATH_SUFFIXES include
NO_DEFAULT_PATH
)
elseif( UNIX )
# On Linux, the NVTX headers are in a separate 'targets' directory
find_path( NVTX_INCLUDE_DIR
NAMES nvToolsExt.h
PATHS
${CUDA_TOOLKIT_ROOT_DIR}/targets/x86_64-linux/include
ENV CUDA_PATH
ENV CUDA_INC_PATH
PATH_SUFFIXES include
NO_DEFAULT_PATH
)
endif()
if( NVTX_INCLUDE_DIR )
set( NVTX_FOUND TRUE )
endif()
mark_as_advanced( NVTX_INCLUDE_DIR )
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindOpenEXR.cmake | CMake | #
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# Find OpenEXR package.
# Optional input variable: OpenEXR_ROOT
# Output variables:
# OpenEXR_FOUND
# OpenEXR_INCLUDE_DIR
# OpenEXR_LIBRARIES
# OpenEXR_VERSION
if( OpenEXR_FOUND )
return()
endif()
set( OpenEXR_LIB_NAMES IlmImf Half Iex Imath IlmThread )
# If OpenEXR_ROOT has changed, unset variables that depend upon it.
set( OpenEXR_ROOT "" CACHE PATH "Path to OpenEXR installation directory" )
if( NOT "${OpenEXR_ROOT}" STREQUAL "${OpenEXR_ROOT_PREVIOUS}" )
message( "New value detected for OpenEXR_ROOT: ${OpenEXR_ROOT}" )
unset( OpenEXR_INCLUDE_DIR )
unset( OpenEXR_LIBRARIES )
unset( OpenEXR_LIB_DIR )
foreach( LIB ${OpenEXR_LIB_NAMES} )
unset( OpenEXR_${LIB}_RELEASE )
unset( OpenEXR_${LIB}_DEBUG )
endforeach( LIB )
unset( OpenEXR_VERSION )
endif()
set( OpenEXR_ROOT_PREVIOUS "${OpenEXR_ROOT}" CACHE PATH "Previous path to OpenEXR" FORCE )
# Find OpenEXR includes.
find_path( OpenEXR_INCLUDE_DIR ImfOutputFile.h
HINTS "${OpenEXR_ROOT}/include/OpenEXR" )
mark_as_advanced( OpenEXR_INCLUDE_DIR )
# Get version number from header, which we need for the library names.
set( OpenEXR_VERSION "" CACHE STRING "OpenEXR version string" )
set( CONFIG_H "${OpenEXR_INCLUDE_DIR}/OpenEXRConfig.h" )
if( NOT OpenEXR_VERSION AND EXISTS "${CONFIG_H}" )
message( "Reading OpenEXR version from ${CONFIG_H}" )
file( STRINGS "${CONFIG_H}" VERSION_STRING
REGEX "#define OPENEXR_VERSION_STRING" )
string( REGEX REPLACE ".*\"([0-9.]+)\".*" "\\1" VERSION_STRING "${VERSION_STRING}" )
set( OpenEXR_VERSION "${VERSION_STRING}" CACHE STRING "OpenEXR version string" FORCE )
endif()
string( REGEX REPLACE "^([0-9]+).*" "\\1" VERSION_MAJOR "${OpenEXR_VERSION}" )
string( REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" VERSION_MINOR "${OpenEXR_VERSION}" )
set( VERSION_SUFFIX "${VERSION_MAJOR}_${VERSION_MINOR}" )
# Allow location of library directory to be overridden.
set( OpenEXR_LIB_DIR "${OpenEXR_ROOT}/lib" CACHE PATH "Path to OpenEXR libraries" )
mark_as_advanced( OpenEXR_LIB_DIR )
# Find OpenEXR libraries.
set( OpenEXR_LIBRARIES "" )
foreach( LIB ${OpenEXR_LIB_NAMES} )
find_library( OpenEXR_${LIB}_RELEASE
NAMES "${LIB}_s" "${LIB}-${VERSION_SUFFIX}_s" "${LIB}"
HINTS "${OpenEXR_LIB_DIR}" )
mark_as_advanced( OpenEXR_${LIB}_RELEASE )
if( OpenEXR_${LIB}_RELEASE )
list( APPEND OpenEXR_LIBRARIES optimized "${OpenEXR_${LIB}_RELEASE}" )
endif()
find_library( OpenEXR_${LIB}_DEBUG
NAMES "${LIB}_s_d" "${LIB}-${VERSION_SUFFIX}_s_d"
HINTS "${OpenEXR_LIB_DIR}" )
mark_as_advanced( OpenEXR_${LIB}_DEBUG )
if( OpenEXR_${LIB}_DEBUG )
list( APPEND OpenEXR_LIBRARIES debug "${OpenEXR_${LIB}_DEBUG}" )
elseif( OpenEXR_${LIB}_RELEASE )
# Fallback: use release libraries if no debug libraries were found.
list( APPEND OpenEXR_LIBRARIES debug "${OpenEXR_${LIB}_RELEASE}" )
endif()
endforeach( LIB )
include( FindPackageHandleStandardArgs )
# find_package_handle_standard_args reports the value of the first variable
# on success, so make sure this is the actual OpenEXR library
find_package_handle_standard_args( OpenEXR
REQUIRED_VARS
OpenEXR_IlmImf_RELEASE OpenEXR_Half_RELEASE OpenEXR_Iex_RELEASE OpenEXR_Imath_RELEASE OpenEXR_IlmThread_RELEASE
OpenEXR_INCLUDE_DIR
VERSION_VAR OpenEXR_VERSION )
foreach( LIB ${OpenEXR_LIB_NAMES} )
if( OpenEXR_${LIB}_RELEASE )
set( target OpenEXR::${LIB} )
add_library( ${target} STATIC IMPORTED )
# We only have release libraries on Linux
if( WIN32 )
set_target_properties( ${target} PROPERTIES
IMPORTED_LOCATION_RELEASE ${OpenEXR_${LIB}_RELEASE}
IMPORTED_LOCATION_DEBUG ${OpenEXR_${LIB}_DEBUG}
MAP_IMPORTED_CONFIG_MINSIZEREL Release
MAP_IMPORTED_CONFIG_RELWITHDEBINFO Release )
# We don't have PDB files for debug builds, so ignore
# LNK4099 PDB 'filename' was not found with 'object/library' or at 'path'; linking object as if no debug info
set_property( TARGET ${target} APPEND PROPERTY INTERFACE_LINK_OPTIONS $<$<CONFIG:Debug>:/ignore:4099> )
else()
set_target_properties( ${target} PROPERTIES
IMPORTED_LOCATION ${OpenEXR_${LIB}_RELEASE}
MAP_IMPORTED_CONFIG_DEBUG ""
MAP_IMPORTED_CONFIG_MINSIZEREL ""
MAP_IMPORTED_CONFIG_RELWITHDEBINFO "" )
endif()
set_property( TARGET ${target} APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${OpenEXR_INCLUDE_DIR} )
endif()
endforeach()
if( OpenEXR_IlmImf_RELEASE AND OpenEXR_IlmThread_RELEASE )
# Record the library dependencies for IlmImf on the other OpenEXR libraries
set_property( TARGET OpenEXR::IlmImf PROPERTY INTERFACE_LINK_LIBRARIES
OpenEXR::Half OpenEXR::Iex OpenEXR::Imath OpenEXR::IlmThread )
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindOptiX.cmake | CMake | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Locate the OptiX distribution. Search relative to the SDK first, then look in the system.
# Our initial guess will be within the SDK.
set(OptiX_INSTALL_DIR "${CMAKE_SOURCE_DIR}/../" CACHE PATH "Path to OptiX installed location.")
# The distribution contains only 64 bit libraries. Error when we have been mis-configured.
if(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
if(WIN32)
message(SEND_ERROR "Make sure when selecting the generator, you select one with Win64 or x64.")
endif()
message(FATAL_ERROR "OptiX only supports builds configured for 64 bits.")
endif()
# search path based on the bit-ness of the build. (i.e. 64: bin64, lib64; 32:
# bin, lib). Note that on Mac, the OptiX library is a universal binary, so we
# only need to look in lib and not lib64 for 64 bit builds.
if(NOT APPLE)
set(bit_dest "64")
else()
set(bit_dest "")
endif()
# Include
find_path(OptiX_INCLUDE
NAMES optix.h
PATHS "${OptiX_INSTALL_DIR}/include"
NO_DEFAULT_PATH
)
find_path(OptiX_INCLUDE
NAMES optix.h
)
# Check to make sure we found what we were looking for
function(OptiX_report_error error_message required component )
if(DEFINED OptiX_FIND_REQUIRED_${component} AND NOT OptiX_FIND_REQUIRED_${component})
set(required FALSE)
endif()
if(OptiX_FIND_REQUIRED AND required)
message(FATAL_ERROR "${error_message} Please locate before proceeding.")
else()
if(NOT OptiX_FIND_QUIETLY)
message(STATUS "${error_message}")
endif(NOT OptiX_FIND_QUIETLY)
endif()
endfunction()
if(NOT OptiX_INCLUDE)
OptiX_report_error("OptiX headers (optix.h and friends) not found." TRUE headers )
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/FindZlibStatic.cmake | CMake | #
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# This is a wrapper for FindZLIB that returns the static library instead of the DSO/DLL.
# Optional input variable: ZlibStatic_ROOT
# Output variables:
# ZlibStatic_FOUND
# ZlibStatic_INCLUDE_DIR
# ZlibStatic_LIBRARIES
# ZlibStatic_VERSION
# FindZLIB honors ZLIB_ROOT, but the lack of a pre-existing cache entry for it is not user-friendly.
set( ZlibStatic_ROOT "" CACHE PATH "Path to Zlib installation directory" )
if( ZlibStatic_ROOT AND NOT ZLIB_ROOT )
set( ZLIB_ROOT "${ZlibStatic_ROOT}" CACHE PATH "Path to Zlib installation directory" FORCE )
unset( ZLIB_INCLUDE_DIR CACHE )
unset( ZLIB_LIBRARY_RELEASE CACHE )
unset( ZLIB_LIBRARY_DEBUG CACHE )
endif()
find_package( ZLIB )
if( NOT ZLIB_FOUND OR NOT ZLIB_LIBRARY_RELEASE )
return()
endif()
# Verify that zlibstatic exists alongside the zlib library.
get_filename_component( LIB_DIR ${ZLIB_LIBRARY_RELEASE} DIRECTORY )
get_filename_component( LIB_FILE_RELEASE ${ZLIB_LIBRARY_RELEASE} NAME )
string( REGEX REPLACE "zlib" "zlibstatic" LIB_FILE_RELEASE "${LIB_FILE_RELEASE}" )
file( GLOB ZlibStatic_LIBRARY_RELEASE "${LIB_DIR}/${LIB_FILE_RELEASE}" )
if( ZLIB_LIBRARY_DEBUG )
get_filename_component( LIB_FILE_DEBUG ${ZLIB_LIBRARY_DEBUG} NAME )
string( REGEX REPLACE "zlib" "zlibstatic" LIB_FILE_DEBUG "${LIB_FILE_DEBUG}" )
file( GLOB ZlibStatic_LIBRARY_DEBUG "${LIB_DIR}/${LIB_FILE_DEBUG}" )
else()
# Fall back on release library if debug library is not found.
set( ZlibStatic_LIBRARY_DEBUG "${ZlibStatic_LIBRARY_RELEASE}"
CACHE FILEPATH "Path to debug Zlib library" )
endif()
if ( ZlibStatic_LIBRARY_RELEASE AND ZlibStatic_LIBRARY_DEBUG )
set( ZlibStatic_LIBRARIES "optimized;${ZlibStatic_LIBRARY_RELEASE};debug;${ZlibStatic_LIBRARY_DEBUG}"
CACHE STRING "Zlib static libraries" )
endif()
set( ZlibStatic_INCLUDE_DIR "${ZLIB_INCLUDE_DIR}"
CACHE PATH "Path to Zlib include directory" )
set( ZlibStatic_VERSION "${ZLIB_VERSION_STRING}"
CACHE STRING "Zlib version number" )
find_package_handle_standard_args( ZlibStatic
REQUIRED_VARS
ZlibStatic_LIBRARY_RELEASE
ZlibStatic_INCLUDE_DIR
VERSION_VAR ZlibStatic_VERSION )
if( ZlibStatic_FOUND )
add_library( Zlib::Static STATIC IMPORTED )
set_target_properties( Zlib::Static PROPERTIES
# Use the release configuration by default
IMPORTED_LOCATION ${ZlibStatic_LIBRARY_RELEASE}
IMPORTED_LOCATION_DEBUG ${ZlibStatic_LIBRARY_DEBUG} )
set_property( TARGET Zlib::Static APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${ZLIB_INCLUDE_DIR} )
endif()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/LinuxCPUInfo.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
IF(EXISTS "/proc/cpuinfo")
FILE(READ /proc/cpuinfo PROC_CPUINFO)
SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n")
STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}")
STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}")
SET(CPU_FAMILY_RX "cpu family[ \t]*:[ \t]*([0-9]+)")
STRING(REGEX MATCH "${CPU_FAMILY_RX}" CPU_FAMILY "${PROC_CPUINFO}")
STRING(REGEX REPLACE "${CPU_FAMILY_RX}" "\\1" CPU_FAMILY "${CPU_FAMILY}")
SET(MODEL_RX "model[ \t]*:[ \t]*([0-9]+)")
STRING(REGEX MATCH "${MODEL_RX}" MODEL "${PROC_CPUINFO}")
STRING(REGEX REPLACE "${MODEL_RX}" "\\1" MODEL "${MODEL}")
SET(FLAGS_RX "flags[ \t]*:[ \t]*([a-zA-Z0-9 _]+)\n")
STRING(REGEX MATCH "${FLAGS_RX}" FLAGS "${PROC_CPUINFO}")
STRING(REGEX REPLACE "${FLAGS_RX}" "\\1" FLAGS "${FLAGS}")
# Debug output.
IF(LINUX_CPUINFO)
MESSAGE(STATUS "LinuxCPUInfo.cmake:")
MESSAGE(STATUS "VENDOR_ID : ${VENDOR_ID}")
MESSAGE(STATUS "CPU_FAMILY : ${CPU_FAMILY}")
MESSAGE(STATUS "MODEL : ${MODEL}")
MESSAGE(STATUS "FLAGS : ${FLAGS}")
ENDIF(LINUX_CPUINFO)
ENDIF(EXISTS "/proc/cpuinfo")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/Macros.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# Appends VAL to the string contained in STR
MACRO(APPEND_TO_STRING STR VAL)
if (NOT "${ARGN}" STREQUAL "")
message(SEND_ERROR "APPEND_TO_STRING takes only a single argument to append. Offending args: ${ARGN}")
endif()
# You need to double ${} STR to get the value. The first one gets
# the variable, the second one gets the value.
if (${STR})
set(${STR} "${${STR}} ${VAL}")
else()
set(${STR} "${VAL}")
endif()
ENDMACRO(APPEND_TO_STRING)
# Prepends VAL to the string contained in STR
MACRO(PREPEND_TO_STRING STR VAL)
if (NOT "${ARGN}" STREQUAL "")
message(SEND_ERROR "PREPEND_TO_STRING takes only a single argument to append. Offending args: ${ARGN}")
endif()
# You need to double ${} STR to get the value. The first one gets
# the variable, the second one gets the value.
if (${STR})
set(${STR} "${VAL} ${${STR}}")
else()
set(${STR} "${VAL}")
endif()
ENDMACRO(PREPEND_TO_STRING)
# Prepends a prefix to items in a list and appends the result to list_out
macro( prepend list_out prefix )
set( _results )
foreach( str ${ARGN} )
list( APPEND _results "${prefix}${str}" )
endforeach()
list( APPEND ${list_out} ${_results} )
endmacro()
#################################################################
# FORCE_ADD_FLAGS(parameter flags)
#
# This will add arguments not found in ${parameter} to the end. It
# does not attempt to remove duplicate arguments already existing in
# ${parameter}.
#################################################################
MACRO(FORCE_ADD_FLAGS parameter)
# Create a separated list of the arguments to loop over
SET(p_list ${${parameter}})
SEPARATE_ARGUMENTS(p_list)
# Make a copy of the current arguments in ${parameter}
SET(new_parameter ${${parameter}})
# Now loop over each required argument and see if it is in our
# current list of arguments.
FOREACH(required_arg ${ARGN})
# This helps when we get arguments to the function that are
# grouped as a string:
#
# ["-msse -msse2"] instead of [-msse -msse2]
SET(TMP ${required_arg}) #elsewise the Seperate command doesn't work)
SEPARATE_ARGUMENTS(TMP)
FOREACH(option ${TMP})
# Look for the required argument in our list of existing arguments
SET(found FALSE)
FOREACH(p_arg ${p_list})
IF (${p_arg} STREQUAL ${option})
SET(found TRUE)
ENDIF (${p_arg} STREQUAL ${option})
ENDFOREACH(p_arg)
IF(NOT found)
# The required argument wasn't found, so we need to add it in.
SET(new_parameter "${new_parameter} ${option}")
ENDIF(NOT found)
ENDFOREACH(option ${TMP})
ENDFOREACH(required_arg ${ARGN})
SET(${parameter} ${new_parameter} CACHE STRING "" FORCE)
ENDMACRO(FORCE_ADD_FLAGS)
# This MACRO is designed to set variables to default values only on
# the first configure. Subsequent configures will produce no ops.
MACRO(FIRST_TIME_SET VARIABLE VALUE TYPE COMMENT)
IF(NOT PASSED_FIRST_CONFIGURE)
SET(${VARIABLE} ${VALUE} CACHE ${TYPE} ${COMMENT} FORCE)
ENDIF(NOT PASSED_FIRST_CONFIGURE)
ENDMACRO(FIRST_TIME_SET)
MACRO(FIRST_TIME_MESSAGE)
IF(NOT PASSED_FIRST_CONFIGURE)
MESSAGE(${ARGV})
ENDIF(NOT PASSED_FIRST_CONFIGURE)
ENDMACRO(FIRST_TIME_MESSAGE)
# Used by ll_to_cpp and bc_to_cpp
find_file(bin2cpp_cmake bin2cpp.cmake ${CMAKE_MODULE_PATH} )
set(bin2cpp_cmake "${bin2cpp_cmake}" CACHE INTERNAL "Path to internal bin2cpp.cmake" FORCE)
# Converts input_ll file to llvm bytecode encoded as a string in the outputSource file
# defined with the export symbol provided.
function(ll_to_cpp input outputSource outputInclude exportSymbol)
# message("input = ${input}")
# message("outputSource = ${outputSource}")
# message("outputInclude = ${outputInclude}")
# message("exportSymbol = ${exportSymbol}")
get_filename_component(outputABS "${outputSource}" ABSOLUTE )
get_filename_component(outputDir "${outputSource}" PATH )
get_filename_component(outputName "${outputSource}" NAME )
file(RELATIVE_PATH outputRelPath "${CMAKE_BINARY_DIR}" "${outputDir}")
set(bc_filename "${outputName}.tmp.bc")
# Generate header file (configure time)
include(bin2cpp)
bin2h(${outputInclude} ${exportSymbol} "${bc_filename}")
# Convert ll to byte code
add_custom_command(
OUTPUT ${outputSource}
# convert ll to bc
COMMAND ${LLVM_llvm-as} "${input}" -o "${bc_filename}"
# convert bc file to cpp
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${outputSource}"
-DCPP_SYMBOL:STRING="${exportSymbol}"
-DSOURCE_BASE:STRING="${outputDir}"
-DSOURCES:STRING="${bc_filename}"
-P "${bin2cpp_cmake}"
# Remove temp bc file
COMMAND ${CMAKE_COMMAND} -E remove -f "${bc_filename}"
WORKING_DIRECTORY ${outputDir}
MAIN_DEPENDENCY ${input}
DEPENDS ${bin2cpp_cmake}
COMMENT "Generating ${outputRelPath}/${outputName}"
)
endfunction()
function(bc_to_cpp input outputSource outputInclude exportSymbol)
# message("input = ${input}")
# message("outputSource = ${outputSource}")
# message("outputInclude = ${outputInclude}")
# message("exportSymbol = ${exportSymbol}")
get_filename_component(outputABS "${outputSource}" ABSOLUTE )
get_filename_component(outputDir "${outputABS}" PATH )
get_filename_component(outputName "${outputABS}" NAME )
file(RELATIVE_PATH outputRelPath "${EXTERNAL_BINARY_DIR}" "${outputDir}")
get_filename_component(inputABS "${input}" ABSOLUTE)
get_filename_component(inputDir "${inputABS}" PATH)
get_filename_component(inputName "${inputABS}" NAME)
set(bc_filename "${inputName}")
# Generate header file (configure time)
include(bin2cpp)
bin2h(${outputInclude} ${exportSymbol} "${bc_filename}")
# Convert ll to byte code
add_custom_command(
OUTPUT ${outputSource}
# convert bc file to cpp
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${outputSource}"
-DCPP_SYMBOL:STRING="${exportSymbol}"
-DSOURCE_BASE:STRING="${inputDir}"
-DSOURCES:STRING="${bc_filename}"
-P "${bin2cpp_cmake}"
WORKING_DIRECTORY ${outputDir}
MAIN_DEPENDENCY ${input}
DEPENDS ${bin2cpp_cmake}
COMMENT "Generating ${outputRelPath}/${outputName}"
)
endfunction()
################################################################################
# Compile the cpp file using clang, run an optimization pass and use bin2c to take the
# resulting code and embed it into a cpp for loading at runtime.
#
# Usage: compile_llvm_runtime( input symbol symbol output_var [clang args] )
# input : [in] File to be compiled by clang
# symbol : [in] Name of C symbol to use for accessing the generated code. Also used to generate the output file names.
# output_var : [out] Generated cpp and header files used to access compiled code at runtime
# clang args : [in] list of arguments to clang
#
function(compile_llvm_runtime input symbol output_var)
set(OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}")
get_filename_component(base "${input}" NAME_WE)
get_filename_component(name "${input}" NAME)
set(bc "${OUTPUT_DIR}/${base}.bc")
set(opt_bc "${OUTPUT_DIR}/${base}_opt.bc")
set(opt_ll "${OUTPUT_DIR}/${base}_opt.ll")
add_custom_command( OUTPUT ${bc} ${opt_bc} ${opt_ll}
COMMAND ${LLVM_TOOLS_BINARY_DIR}/clang ${input} -o ${bc} ${ARGN}
COMMAND ${LLVM_TOOLS_BINARY_DIR}/opt ${bc} -o ${opt_bc} -always-inline -mem2reg -scalarrepl
COMMAND ${LLVM_TOOLS_BINARY_DIR}/llvm-dis ${opt_bc} -o ${opt_ll}
COMMENT "Compiling ${name} to ${base}_opt.ll"
WORKING_DIRECTORY "${OUTPUT_DIR}"
MAIN_DEPENDENCY "${input}"
)
set(bin2c_files
"${CMAKE_CURRENT_BINARY_DIR}/${symbol}.cpp"
"${CMAKE_CURRENT_BINARY_DIR}/${symbol}.h"
)
#bc_to_cpp(${opt_bc} ${bin2c_files} ${symbol})
ll_to_cpp(${opt_ll} ${bin2c_files} ${symbol})
set_source_files_properties( ${bin2c_files} PROPERTIES GENERATED TRUE )
set(${output_var} ${bin2c_files} PARENT_SCOPE)
endfunction()
################################################################################
# Assemble the ll file to bitcode using llvm-as and use bin2c to take the
# resulting code and embed it into a cpp for loading at runtime.
#
# Usage: compile_llvm( input symbol symbol output_var [clang args] )
# input : [in] File to be assembled
# symbol : [in] Name of C symbol to use for accessing the generated code. Also used to generate the output file names.
# output_var : [out] Generated cpp and header files used to access compiled code at runtime
#
function(assemble_llvm input symbol output_var)
set(OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}")
get_filename_component(base "${input}" NAME_WE)
get_filename_component(name "${input}" NAME)
set(bin2c_files
"${CMAKE_CURRENT_BINARY_DIR}/${symbol}.cpp"
"${CMAKE_CURRENT_BINARY_DIR}/${symbol}.h"
)
#bc_to_cpp(${opt_bc} ${bin2c_files} ${symbol})
ll_to_cpp(${input} ${bin2c_files} ${symbol})
set_source_files_properties( ${bin2c_files} PROPERTIES GENERATED TRUE )
set(${output_var} ${bin2c_files} PARENT_SCOPE)
endfunction()
################################################################################
# Compile the cpp file using clang
#
# Usage: cpp_to_bc( input output_var [clang args] )
# input : [in] File to be compiled by clang
# output_var : [out] Generated bc file
# clang args : [in] list of arguments to clang
#
function(cpp_to_bc input output_var)
set(OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}")
get_filename_component(base "${input}" NAME_WE)
get_filename_component(name "${input}" NAME)
set(bc "${OUTPUT_DIR}/${base}.bc")
add_custom_command( OUTPUT ${bc}
COMMAND ${LLVM_TOOLS_BINARY_DIR}/clang ${input} -c -o ${bc} ${ARGN}
COMMENT "Compiling ${name} to ${base}.bc"
WORKING_DIRECTORY "${OUTPUT_DIR}"
MAIN_DEPENDENCY "${input}"
)
set(${output_var} ${bc} PARENT_SCOPE)
endfunction()
################################################################################
# Copy ptx scripts into a string in a cpp header file.
#
# Usage: ptx_to_cpp( ptx_cpp_headers my_directory FILE1 FILE2 ... FILEN )
# ptx_cpp_files : [out] List of cpp files created (Note: new files are appended to this list)
# directory : [in] Directory in which to place the resulting headers
# FILE1 .. FILEN : [in] ptx files to be cpp stringified
#
# FILE1 -> filename: ${FILE1}_ptx.cpp
# -> string : const char* nvrt::${FILE1}_ptx = "...";
macro( ptx_to_cpp ptx_cpp_files directory )
foreach( file ${ARGN} )
if( ${file} MATCHES ".*\\.ptx$" )
#message( "file_name : ${file}" )
# Create the output cpp file name
get_filename_component( base_name ${file} NAME_WE )
set( cpp_filename ${directory}/${base_name}_ptx.cpp )
set( variable_name ${base_name}_ptx )
set( ptx2cpp ${CMAKE_SOURCE_DIR}/CMake/ptx2cpp.cmake )
#message( "base_name : ${base_name}" )
#message( "cpp_file_name : ${cpp_filename}" )
#message( "variable_name : ${variable_name}" )
add_custom_command( OUTPUT ${cpp_filename}
COMMAND ${CMAKE_COMMAND}
-DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_filename}"
-DPTX_FILE:STRING="${file}"
-DVARIABLE_NAME:STRING=${variable_name}
-DNAMESPACE:STRING=optix
-P ${ptx2cpp}
DEPENDS ${file}
DEPENDS ${ptx2cpp}
COMMENT "${ptx2cpp}: converting ${file} to ${cpp_filename}"
)
list(APPEND ${ptx_cpp_files} ${cpp_filename} )
#message( "ptx_cpp_files : ${${ptx_cpp_files}}" )
endif( ${file} MATCHES ".*\\.ptx$" )
endforeach( file )
endmacro( ptx_to_cpp )
################################################################################
# Strip library of all local symbols
#
# Usage: strip_symbols( target )
# target : [out] target name for the library to be stripped
function( strip_symbols target )
if( NOT WIN32 )
add_custom_command( TARGET ${target}
POST_BUILD
# using -x to strip all local symbols
COMMAND ${CMAKE_STRIP} -x $<TARGET_FILE:${target}>
COMMENT "Stripping symbols from ${target}"
)
endif()
endfunction( strip_symbols )
################################################################################
# Only export the symbols that we need.
#
# Usage: optix_setup_exports( target export_file hidden_file )
# target : [in] target name for the library to be stripped
# export_file : [in] name of the file that contains the export symbol names
# hidden_file : [in] name of the file that contains the hidden symbol names.
# Might be empty string in which all non-exported symbols
# are hidden. Only used for UNIX and NOT APPLE.
#
# Do not use this macro with WIN32 DLLs unless you are not using the dllexport
# macros. The DLL name will be set using the SOVERSION property of the target,
# so be sure to set that before calling this macro
#
function( optix_setup_exports target export_file hidden_file)
# Suck in the exported symbol list. It should define exported_symbols.
include(${export_file})
# Suck in the hidden symbol list unless hidden_file is empty. It should
# definde hidden_symbols.
if (NOT "${hidden_file}" STREQUAL "")
include(${hidden_file})
endif()
if( UNIX )
if ( APPLE )
# -exported_symbols_list lists the exact set of symbols to export. You can call it
# more than once if needed.
set( export_arg -exported_symbols_list )
else()
# -Bsymbolic tells the linker to resolve any local symbols locally first.
# --version-script allows us to be explicit about which symbols to export.
set( export_arg -Bsymbolic,--version-script )
endif()
# Create the symbol export file. Since Apple and Linux have different file formats
# for doing this we will have to specify the information in the file differently.
set(exported_symbol_file ${CMAKE_CURRENT_BINARY_DIR}/${target}_exported_symbols.txt)
if(APPLE)
# The base name of the symbols just has the name. We need to prefix them with "_".
set(modified_symbols)
foreach(symbol ${exported_symbols})
list(APPEND modified_symbols "_${symbol}")
endforeach()
# Just list the symbols. One per line. Since we are treating the list as a string
# here we can replace the ';' character with a newline.
string(REPLACE ";" "\n" exported_symbol_file_content "${modified_symbols}")
file(WRITE ${exported_symbol_file} "${exported_symbol_file_content}\n")
else()
# Format is:
#
# {
# global:
# extern "C" {
# exported_symbol;
# };
# local:
# hidden_symbol; // or "*";
# };
# Just list the symbols. One per line. Since we are treating the list as a string
# here we can insert the newline after the ';' character.
string(REPLACE ";" ";\n" exported_symbol_file_content "${exported_symbols}")
if (NOT "${hidden_file}" STREQUAL "")
string(REPLACE ";" ";\n" hidden_symbol_file_content "${hidden_symbols}")
else()
set( hidden_symbol_file_content "*" )
endif()
file(WRITE ${exported_symbol_file} "{\nglobal:\nextern \"C\" {\n${exported_symbol_file_content};\n};\nlocal:\n${hidden_symbol_file_content};\n};\n")
endif()
# Add the command to the LINK_FLAGS
set_property( TARGET ${target}
APPEND_STRING
PROPERTY LINK_FLAGS
" -Wl,${export_arg},${exported_symbol_file}"
)
elseif( WIN32 )
set(exported_symbol_file ${CMAKE_CURRENT_BINARY_DIR}/${target}.def)
set(name ${target} )
get_property( abi_version TARGET ${target} PROPERTY SOVERSION )
if( abi_version )
set(name "${name}.${abi_version}")
endif()
# Format is:
#
# NAME <dllname>
# EXPORTS
# <names>
#
string(REPLACE ";" "\n" def_file_content "${exported_symbols}" )
file(WRITE ${exported_symbol_file} "NAME ${name}.dll\nEXPORTS\n${def_file_content}")
# Add the command to the LINK_FLAGS
set_property( TARGET ${target}
APPEND_STRING
PROPERTY LINK_FLAGS
" /DEF:${exported_symbol_file}"
)
endif()
# Make sure that if the exported_symbol_file changes we relink the library.
set_property( TARGET ${target}
APPEND
PROPERTY LINK_DEPENDS
"${exported_symbol_file}"
)
endfunction()
################################################################################
# Some helper functions for pushing and popping variable values
#
function(push_variable variable)
#message("push before: ${variable} = ${${variable}}, ${variable}_STACK = ${${variable}_STACK}")
#message(" ARGN = ${ARGN}")
#message(" ARGC = ${ARGC}, ARGV = ${ARGV}")
if(ARGC LESS 2)
message(FATAL_ERROR "push_variable requires at least one value to push.")
endif()
# Because the old value may be a list, we need to indicate how many items
# belong to this push. We do this by marking the start of the new push.
list(LENGTH ${variable}_STACK start_index)
# If the value of variable is empty, then we need to leave a placeholder,
# because CMake doesn't have an "empty" token.
if (DEFINED ${variable} AND NOT ${variable} STREQUAL "")
list(APPEND ${variable}_STACK ${${variable}} ${start_index})
else()
list(APPEND ${variable}_STACK ${variable}_EMPTY ${start_index})
endif()
# Make the stack visible outside of the function's scope.
set(${variable}_STACK ${${variable}_STACK} PARENT_SCOPE)
# Set the new value of the variable.
set(${variable} ${ARGN} PARENT_SCOPE)
#set(${variable} ${ARGN}) # use for the output message below
#message("push after : ${variable} = ${${variable}}, ${variable}_STACK = ${${variable}_STACK}")
endfunction()
function(pop_variable variable)
#message("pop before: ${variable} = ${${variable}}, ${variable}_STACK = ${${variable}_STACK}")
# Find the length of the stack to use as an index to the end of the list.
list(LENGTH ${variable}_STACK stack_length)
if(stack_length LESS 2)
message(FATAL_ERROR "${variable}_STACK is empty. Can't pop any more values.")
endif()
math(EXPR stack_end "${stack_length} - 1")
# Get the start of where the old value begins in the stack.
list(GET ${variable}_STACK ${stack_end} variable_start)
math(EXPR variable_end "${stack_end} - 1")
foreach(index RANGE ${variable_start} ${variable_end})
list(APPEND list_indices ${index})
endforeach()
list(GET ${variable}_STACK ${list_indices} stack_popped)
# If the first element is our special EMPTY token, then we should empty it out
if(stack_popped STREQUAL "${variable}_EMPTY")
set(stack_popped "")
endif()
# Remove all the items
list(APPEND list_indices ${stack_end})
list(REMOVE_AT ${variable}_STACK ${list_indices})
# Make sthe stack visible outside of the function's scope.
set(${variable}_STACK ${${variable}_STACK} PARENT_SCOPE)
# Assign the old value to the variable
set(${variable} ${stack_popped} PARENT_SCOPE)
#set(${variable} ${stack_popped}) # use for the output message below
#message("pop after : ${variable} = ${${variable}}, ${variable}_STACK = ${${variable}_STACK}")
endfunction()
# Helper function to generate ptx for a particular sm versions.
#
# sm_versions[input]: a list of version, such as sm_13;sm_20. These will be used to
# generate the names of the output files.
# generate_files[output]: list of generated source files
# ARGN[input]: list of input CUDA C files and other options to pass to nvcc.
#
function(compile_ptx sm_versions_in generated_files)
# CUDA_GET_SOURCES_AND_OPTIONS is a FindCUDA internal command that we are going to
# borrow. There are no guarantees on backward compatibility using this macro.
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
# Check to see if they specified an sm version, and spit out an error.
list(FIND _options -arch arch_index)
if(arch_index GREATER -1)
math(EXPR sm_index "${arch_index}+1")
list(GET _options ${sm_index} sm_value)
message(FATAL_ERROR "-arch ${sm_value} has been specified to compile_ptx. Please remove that option and put it in the sm_versions argument.")
endif()
set(${generated_files})
set(sm_versions ${sm_versions_in})
if(NOT CUDA_SM_20)
list(REMOVE_ITEM sm_versions sm_20)
endif()
push_variable(CUDA_64_BIT_DEVICE_CODE ON)
foreach(source ${_sources})
set(ptx_generated_files)
#message("\n\nProcessing ${source}")
foreach(sm ${sm_versions})
# Generate the 32 bit ptx for 32 bit builds and when the CMAKE_OSX_ARCHITECTURES
# specifies it.
list(FIND CMAKE_OSX_ARCHITECTURES i386 osx_build_32_bit_ptx)
if( CMAKE_SIZEOF_VOID_P EQUAL 4 OR NOT osx_build_32_bit_ptx LESS 0)
set(CUDA_64_BIT_DEVICE_CODE OFF)
CUDA_WRAP_SRCS( ptx_${sm}_32 PTX _generated_files ${source} ${_cmake_options}
OPTIONS -arch ${sm} ${_options}
)
# Add these files onto the list of files.
list(APPEND ptx_generated_files ${_generated_files})
endif()
# Generate the 64 bit ptx for 64 bit builds and when the CMAKE_OSX_ARCHITECTURES
# specifies it.
list(FIND CMAKE_OSX_ARCHITECTURES x86_64 osx_build_64_bit_ptx)
if( CMAKE_SIZEOF_VOID_P EQUAL 8 OR NOT osx_build_64_bit_ptx LESS 0)
set(CUDA_64_BIT_DEVICE_CODE ON)
CUDA_WRAP_SRCS( ptx_${sm}_64 PTX _generated_files ${source} ${_cmake_options}
OPTIONS -arch ${sm} ${_options}
)
# Add these files onto the list of files.
list(APPEND ptx_generated_files ${_generated_files})
endif()
endforeach()
get_filename_component(source_basename "${source}" NAME_WE)
set(cpp_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_ptx.cpp")
set(h_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_ptx.h")
set(relative_ptx_generated_files)
foreach(file ${ptx_generated_files})
get_filename_component(fname "${file}" NAME)
list(APPEND relative_ptx_generated_files "${fname}")
endforeach()
# Now generate a target that will generate the wrapped version of the ptx
# files at build time
set(symbol "${source_basename}_source")
add_custom_command( OUTPUT ${cpp_wrap}
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_wrap}"
-DCPP_SYMBOL:STRING="${symbol}"
-DSOURCE_BASE:STRING="${CMAKE_CURRENT_BINARY_DIR}"
-DSOURCES:STRING="${relative_ptx_generated_files}"
ARGS -P "${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake"
DEPENDS ${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake ${ptx_generated_files}
)
# We know the list of files at configure time, so generate the files here
include(bin2cpp)
bin2h("${h_wrap}" ${symbol} ${relative_ptx_generated_files})
list(APPEND ${generated_files} ${ptx_generated_files} ${cpp_wrap} ${h_wrap})
endforeach(source)
pop_variable(CUDA_64_BIT_DEVICE_CODE)
set(${generated_files} ${${generated_files}} PARENT_SCOPE)
endfunction()
# Helper function to generate the appropiate options for a CUDA compile
# based on the target architectures.
#
# Function selects the higher compute capability available and generates code for that one.
#
# Usage: cuda_generate_runtime_target_options( output_var target_list )
#
# output[output] is a list-variable to fill with options for CUDA_COMPILE
# ARGN[input] is a list of targets, i.e. sm_11 sm_20 sm_30
# NO_PTX in the input list will not add PTX to the highest SM version
function( cuda_generate_runtime_target_options output )
# remove anything that is not sm_XX, and look for NO_PTX option
set( no_ptx FALSE )
foreach(target ${ARGN})
string( REGEX MATCH "^(sm_[0-9][0-9])$" match ${target} )
if( NOT CMAKE_MATCH_1 )
list( REMOVE_ITEM ARGN ${target} )
endif( NOT CMAKE_MATCH_1 )
if( target STREQUAL "NO_PTX" )
set( no_ptx TRUE )
endif()
endforeach(target)
list( LENGTH ARGN valid_target_count )
if( valid_target_count GREATER 0 )
# We will add compute_XX automatically, infer max compatible compute capability.
# check targets for max compute capability
set( smver_max "0" )
foreach(target ${ARGN})
string( REGEX MATCH "sm_([0-9][0-9])$" sm_ver_match ${target} )
if( CMAKE_MATCH_1 )
if( ${CMAKE_MATCH_1} STRGREATER smver_max )
set( smver_max ${CMAKE_MATCH_1} )
endif( ${CMAKE_MATCH_1} STRGREATER smver_max )
endif( CMAKE_MATCH_1 )
unset( sm_ver_match )
endforeach(target)
if( no_ptx )
set( smver_max "You can't match me, I'm the ginger bread man!" )
endif()
# copy the input list to a new one and sort it
set( sm_versions ${ARGN} )
list( SORT sm_versions )
# walk to SM versions to generate the entries of gencode
set( options "" )
foreach( sm_ver ${sm_versions} )
string( REGEX MATCH "sm_([0-9][0-9])$" sm_ver_num ${sm_ver} )
# This adds compute_XX automatically, in order to generate PTX.
if( ${CMAKE_MATCH_1} STREQUAL ${smver_max} )
# append the max compute capability, to get compute_XX too.
# this appends the PTX code for the higher SM_ version
set(entry -gencode=arch=compute_${CMAKE_MATCH_1},code=\\\"${sm_ver},compute_${smver_max}\\\")
else( ${CMAKE_MATCH_1} STREQUAL ${smver_max} )
set(entry -gencode=arch=compute_${CMAKE_MATCH_1},code=\\\"${sm_ver}\\\")
endif( ${CMAKE_MATCH_1} STREQUAL ${smver_max} )
list( APPEND options ${entry} )
endforeach( sm_ver ${sm_versions} )
# return the generated option string
set( ${output} ${options} PARENT_SCOPE )
unset( smver_max )
unset( sm_versions )
else( valid_target_count GREATER 0 )
# return empty string
set( ${output} "" PARENT_SCOPE )
endif( valid_target_count GREATER 0 )
endfunction(cuda_generate_runtime_target_options)
# Compile the list of SASS assembler files to cubins.
# Then take the resulting file and store it in a cpp file using bin2c.
#
# Usage: compile_sass_to_cpp( _generated_files files [files...] [OPTIONS ...] )
#
# _generated_files[output] is a list-variable to fill with the names of the generated files
# ARGN[input] is a list of files and nvasm_internal options.
function(compile_sass_to_cpp _generated_files )
# CUDA_GET_SOURCES_AND_OPTIONS is a FindCUDA internal command that we are going to
# borrow. There are no guarantees on backward compatibility using this macro.
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
set(generated_files)
foreach(source ${_sources})
get_filename_component(source_basename ${source} NAME_WE)
set(cubinfile ${CMAKE_CURRENT_BINARY_DIR}/${source_basename}.cubin)
set(source ${CMAKE_CURRENT_SOURCE_DIR}/${source})
set(cuda_build_comment_string "Assembling to cubin file ${source}" )
set_source_files_properties(${source} PROPERTIES HEADER_FILE_ONLY TRUE)
add_custom_command(OUTPUT ${cubinfile}
COMMAND ${CUDA_NVASM_EXECUTABLE} ${_options} ${source} -o ${cubinfile}
DEPENDS ${source}
COMMENT "${cuda_build_comment_string}"
)
set(cpp_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_cuda.cpp")
set(h_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_cuda.h")
get_filename_component(generated_file_path "${cubinfile}" DIRECTORY)
get_filename_component(relative_cuda_generated_file "${cubinfile}" NAME)
# Now generate a target that will generate the wrapped version of the cuda
# files at build time
set(symbol "${source_basename}_cuda_source")
add_custom_command( OUTPUT ${cpp_wrap}
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_wrap}"
-DCPP_SYMBOL:STRING="${symbol}"
-DSOURCE_BASE:STRING="${generated_file_path}"
-DSOURCES:STRING="${relative_cuda_generated_file}"
ARGS -P "${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake"
DEPENDS ${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake ${cubinfile}
)
# We know the list of files at configure time, so generate the files here
include(bin2cpp)
bin2h("${h_wrap}" ${symbol} ${relative_cuda_generated_files})
list(APPEND generated_files ${cubinfile} ${cpp_wrap} ${h_wrap})
endforeach()
set_source_files_properties(${generated_files} PROPERTIES GENERATED TRUE)
set(${_generated_files} ${generated_files} PARENT_SCOPE)
endfunction()
# Compile the list of cuda files using the specified format. Then take the resulting file
# and store it in a cpp file using bin2c. This is not appropriate for PTX formats. Use
# compile_ptx for that.
#
# Usage: compile_cuda_to_cpp( target_name format _generated_files files [files...] [OPTIONS ...] )
#
# target_name[input] name to use for mangling output files.
# format[input] OBJ SEPARABLE_OBJ CUBIN FATBIN
# _generated_files[output] is a list-variable to fill with the names of the generated files
# ARGN[input] is a list of files and optional CUDA_WRAP_SRCS options. See documentation
# for CUDA_WRAP_SRCS in FindCUDA.cmake.
function(compile_cuda_to_cpp target_name format _generated_files)
# CUDA_GET_SOURCES_AND_OPTIONS is a FindCUDA internal command that we are going to
# borrow. There are no guarantees on backward compatibility using this macro.
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
if(${format} MATCHES "SEPARABLE_OBJ")
# It's OK to set this without resetting it later, since this is a function with a
# localized scope.
set(CUDA_SEPARABLE_COMPILATION ON)
set(format OBJ)
elseif(${format} MATCHES "PTX")
message(FATAL_ERROR "compile_cuda_to_cpp called with PTX format which is unsupported. Try compile_ptx instead.")
endif()
set(${_generated_files})
foreach(source ${_sources})
CUDA_WRAP_SRCS(${target_name} ${format} objfile ${source} OPTIONS ${_options} )
get_filename_component(source_basename "${source}" NAME_WE)
set(cpp_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_cuda.cpp")
set(h_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_cuda.h")
get_filename_component(generated_file_path "${objfile}" DIRECTORY)
get_filename_component(relative_cuda_generated_file "${objfile}" NAME)
# Now generate a target that will generate the wrapped version of the cuda
# files at build time
set(symbol "${source_basename}_cuda_source")
add_custom_command( OUTPUT ${cpp_wrap}
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_wrap}"
-DCPP_SYMBOL:STRING="${symbol}"
-DSOURCE_BASE:STRING="${generated_file_path}"
-DSOURCES:STRING="${relative_cuda_generated_file}"
ARGS -P "${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake"
DEPENDS ${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake ${objfile}
)
# We know the list of files at configure time, so generate the files here
include(bin2cpp)
bin2h("${h_wrap}" ${symbol} ${relative_cuda_generated_files})
list(APPEND ${_generated_files} ${objfile} ${cpp_wrap} ${h_wrap})
endforeach()
set(${_generated_files} ${${_generated_files}} PARENT_SCOPE)
endfunction()
# Compile the list of cuda files using the specified format. Then take the resulting file
# and store it in a cpp file using bin2c. Appends the variant name to the source file name.
#
# Usage: compile_cuda_to_cpp_variant( target_name variant_name format _generated_files files [files...] [OPTIONS ...] )
#
# target_name[input] name to use for mangling output files.
# variant_name[input] name to append to filenames.
# format[input] OBJ SEPARABLE_OBJ CUBIN FATBIN
# _generated_files[output] is a list-variable to fill with the names of the generated files
# ARGN[input] is a list of files and optional CUDA_WRAP_SRCS options. See documentation
# for CUDA_WRAP_SRCS in FindCUDA.cmake.
function(compile_cuda_to_cpp_variant target_name variant_name format _generated_files)
# CUDA_GET_SOURCES_AND_OPTIONS is a FindCUDA internal command that we are going to
# borrow. There are no guarantees on backward compatibility using this macro.
CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
if(${format} MATCHES "SEPARABLE_OBJ")
# It's OK to set this without resetting it later, since this is a function with a
# localized scope.
set(CUDA_SEPARABLE_COMPILATION ON)
set(format OBJ)
elseif(${format} MATCHES "PTX")
message(FATAL_ERROR "compile_cuda_to_cpp called with PTX format which is unsupported. Try compile_ptx instead.")
endif()
set(${_generated_files})
foreach(source ${_sources})
get_filename_component(source_basename "${source}" NAME_WE)
CUDA_WRAP_SRCS(${target_name}_${variant_name} ${format} objfile ${source} OPTIONS ${_options} )
set(cpp_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_${variant_name}_cuda.cpp")
set(h_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}_${variant_name}_cuda.h")
get_filename_component(generated_file_path "${objfile}" DIRECTORY)
get_filename_component(relative_cuda_generated_file "${objfile}" NAME)
# Now generate a target that will generate the wrapped version of the cuda
# files at build time
set(symbol "${source_basename}_${variant_name}_cuda_source")
add_custom_command( OUTPUT ${cpp_wrap}
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_wrap}"
-DCPP_SYMBOL:STRING="${symbol}"
-DSOURCE_BASE:STRING="${generated_file_path}"
-DSOURCES:STRING="${relative_cuda_generated_file}"
ARGS -P "${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake"
DEPENDS ${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake ${objfile}
)
# We know the list of files at configure time, so generate the files here
include(bin2cpp)
bin2h("${h_wrap}" ${symbol} ${relative_cuda_generated_files})
list(APPEND ${_generated_files} ${objfile} ${cpp_wrap} ${h_wrap})
endforeach()
set(${_generated_files} ${${_generated_files}} PARENT_SCOPE)
endfunction()
# Compile the list of ptx files. Then take the resulting file
# and store it in a cpp file using bin2c.
#
function(compile_ptx_to_cpp input_ptx _generated_files extra_dependencies )
get_filename_component(source_ptx "${input_ptx}" REALPATH )
get_filename_component(source_basename "${source_ptx}" NAME_WE)
message("source_ptx ${source_ptx}")
message("source_basename ${source_basename}")
set(fatbin ${CMAKE_CURRENT_BINARY_DIR}/${source_basename}.fatbin)
set(build_directory "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles")
# the source is passed in as the first parameter
# use this function to easily extract options
CUDA_GET_SOURCES_AND_OPTIONS( _ptx_wrap_sources _ptx_wrap_cmake_options _ptx_wrap_options ${ARGN})
# extract all macro definitions so we can pass these to the c preprocessor
string(REGEX MATCHALL "-D[A-z_][A-z_0-9]*(=[A-z_0-9]*)?" macros ${ARGN})
set(fatbin_command)
set(cubin_commands)
if (MSVC)
set(preprocess_command CL /nologo /E /EP)
elseif(CMAKE_COMPILER_IS_GNUCC)
set(preprocess_command gcc -P -E -x assembler-with-cpp )
else()
message(FATAL_ERROR "Unknown preprocessor command")
endif()
foreach(cuda_sm_target ${cuda_sm_targets})
string(REGEX REPLACE "sm_" "" cuda_sm "${cuda_sm_target}")
# Take input PTX and generate cubin
set(preprocessed_ptx ${build_directory}/${source_basename}.${cuda_sm}.ptx)
set(cubin ${build_directory}/${source_basename}.${cuda_sm}.cubin)
list(APPEND cubin_commands
COMMAND ${preprocess_command} -DPTXAS ${macros} ${PTXAS_INCLUDES} -D__CUDA_TARGET__=${cuda_sm} -D__CUDA_ARCH__=${cuda_sm}0 ${source_ptx} > ${preprocessed_ptx}
COMMAND ${CUDA_NVCC_EXECUTABLE} ${_ptx_wrap_options} -arch=sm_${cuda_sm} --cubin ${preprocessed_ptx} -dc -o ${cubin}
)
list(APPEND fatbin_command "--image=profile=sm_${cuda_sm},file=${cubin}")
endforeach()
add_custom_command(
OUTPUT ${fatbin}
${cubin_commands}
COMMAND ${CUDA_FATBINARY_EXECUTABLE} --create="${fatbin}" -64 -c --cmdline="" ${fatbin_command}
MAIN_DEPENDENCY ${source_ptx}
DEPENDS ${extra_dependencies}
)
set(cpp_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}.cpp")
set(h_wrap "${CMAKE_CURRENT_BINARY_DIR}/${source_basename}.h")
get_filename_component(generated_file_path "${fatbin}" DIRECTORY)
get_filename_component(relative_generated_file "${fatbin}" NAME)
set(symbol "${source_basename}")
add_custom_command( OUTPUT ${cpp_wrap}
COMMAND ${CMAKE_COMMAND} -DCUDA_BIN2C_EXECUTABLE:STRING="${CUDA_BIN2C_EXECUTABLE}"
-DCPP_FILE:STRING="${cpp_wrap}"
-DCPP_SYMBOL:STRING="${symbol}"
-DSOURCE_BASE:STRING="${generated_file_path}"
-DSOURCES:STRING="${relative_generated_file}"
ARGS -P "${bin2cpp_cmake}"
DEPENDS ${CMAKE_SOURCE_DIR}/CMake/bin2cpp.cmake ${fatbin}
)
bin2h("${h_wrap}" "${symbol}" "${relative_generated_file}")
set(${_generated_files} ${cpp_wrap} ${h_wrap} PARENT_SCOPE)
endfunction()
# Create multiple bitness targets for mac universal builds
#
# Usage: OPTIX_MAKE_UNIVERSAL_CUDA_RUNTIME_OBJECTS(
# target_name
# generated_files_var
# FILE0.cu FILE1.cu ... FILEN.cu
# OPTIONS
# option1 option2
# )
#
# target_name [input ] name prefix for the resulting files
# generated_files [output] list of filenames of resulting object files
# ARGN [input ] is a list of source files plus possibly options
function( OPTIX_MAKE_UNIVERSAL_CUDA_RUNTIME_OBJECTS target_name generated_files )
# If you specified CMAKE_OSX_ARCHITECTURES that means you want a universal build (though
# this should work for a single architecture).
if (CMAKE_OSX_ARCHITECTURES)
set(cuda_files)
push_variable(CUDA_64_BIT_DEVICE_CODE OFF)
list(LENGTH CMAKE_OSX_ARCHITECTURES num_arches)
if (num_arches GREATER 1)
# If you have more than one architecture then you don't want to attach the build rule
# to the file itself otherwise you could compile some files in multiple targets.
set(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE OFF)
endif()
foreach(arch ${CMAKE_OSX_ARCHITECTURES})
# Set the bitness of the build specified by nvcc to the one matching the OSX
# architecture.
if (arch STREQUAL "i386")
set(CUDA_64_BIT_DEVICE_CODE OFF)
elseif (arch STREQUAL "x86_64")
set(CUDA_64_BIT_DEVICE_CODE ON)
else()
message(SEND_ERROR "Unknown OSX arch ${arch}")
endif()
CUDA_WRAP_SRCS(
${target_name}_${arch}
OBJ
cuda_sources
${ARGN}
)
list( APPEND cuda_files ${cuda_sources} )
endforeach()
pop_variable(CUDA_64_BIT_DEVICE_CODE)
else()
CUDA_WRAP_SRCS(
${target_name}
OBJ
cuda_files
${ARGN}
)
endif()
set( ${generated_files} ${cuda_files} PARENT_SCOPE )
endfunction()
################################################################
# Simple debugging function for printing the value of a variable
#
# USAGE: print_var(
# var
# msg (optional)
# )
function(print_var var)
set(msg "")
if (ARGC GREATER 1)
set(msg "${ARGV1}\n ")
endif()
message("${msg}${var}:${${var}}")
endfunction()
################################################################
# Function for adding a list of files from a subdirectory. Also adds an IDE source group.
#
# dir - name of the sub directory
# source_list_var - name of the variable to set the list of sources to
# ARGN - list of sources relative to the sub directory
function(optix_add_subdirectory_sources dir source_list_var)
set(files ${ARGN})
set(sources)
string( REPLACE "${CMAKE_CURRENT_SOURCE_DIR}/" "" rel_dir ${dir} )
foreach(filename ${files})
if(NOT IS_ABSOLUTE ${filename})
set(filename ${rel_dir}/${filename})
endif()
list(APPEND sources ${filename})
endforeach()
# Make a source group
string( REPLACE "/" "\\\\" group_name ${rel_dir} )
if( group_name )
source_group( ${group_name} FILES ${sources} )
endif()
set(${source_list_var} ${sources} PARENT_SCOPE)
endfunction()
################################################################
# optixSharedLibraryResources
#
# Handle common logic for Windows resource script generation for shared
# libraries. The variable 'resourceFiles' is set on the parent scope for
# use in add_library.
#
# outputName - The value to assign to 'output_name' in the parent scope
# and used to locate the resource script template to configure.
#
# Side effects:
#
# output_name - Value to be used for the OUTPUT_NAME property of the target.
# resourceFiles - List of resource files to be added to the target.
#
macro( optixSharedLibraryResources outputName )
set( resourceFiles )
set( output_name ${outputName} )
if( WIN32 )
# On Windows, we want the version number in the DLL filename.
# Windows ignores the SOVERSION property and only uses the OUTPUT_NAME property.
# Linux puts the version number in the filename from the SOVERSION property.
# We need to adjust this variable before we call configure_file.
set( output_name "${outputName}.${OPTIX_SO_VERSION}" )
configure_file( "${outputName}.rc.in" "${outputName}.rc" @ONLY )
set( resourceFiles "${CMAKE_CURRENT_BINARY_DIR}/${outputName}.rc" "${CMAKE_BINARY_DIR}/include/optix_rc.h" )
source_group( Resources FILES ${resourceFiles} )
endif()
endmacro()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/ptx2cpp.cmake | CMake |
#
# Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property and proprietary
# rights in and to this software, related documentation and any modifications thereto.
# Any use, reproduction, disclosure or distribution of this software and related
# documentation without an express license agreement from NVIDIA Corporation is strictly
# prohibited.
#
# TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
# AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
# INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
# SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
# LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
# BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
# INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES
#
# This script produces a string variable from the contents of a ptx
# script. The variable is defined in the .cc file and the .h file.
# This script excepts the following variable to be passed in like
# -DVAR:TYPE=VALUE
#
# CPP_FILE
# PTX_FILE
# VARIABLE_NAME
# NAMESPACE
# CUDA_BIN2C_EXECUTABLE
# message("PTX_FILE = ${PTX_FILE}")
# message("CPP_FILE = ${C_FILE}")
# message("VARIABLE_NAME = ${VARIABLE_NAME}")
# message("NAMESPACE = ${NAMESPACE}")
execute_process( COMMAND ${CUDA_BIN2C_EXECUTABLE} -p 0 -st -c -n ${VARIABLE_NAME}_static "${PTX_FILE}"
OUTPUT_VARIABLE bindata
RESULT_VARIABLE result
ERROR_VARIABLE error
)
if(result)
message(FATAL_ERROR "bin2c error:\n" ${error})
endif()
set(BODY
"${bindata}\n"
"namespace ${NAMESPACE} {\n\nstatic const char* const ${VARIABLE_NAME} = reinterpret_cast<const char*>(&${VARIABLE_NAME}_static[0]);\n} // end namespace ${NAMESPACE}\n")
file(WRITE ${CPP_FILE} "${BODY}")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/CMake/testmain.c | C |
/*
* Copyright (c) 2008 - 2021 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and proprietary
* rights in and to this software, related documentation and any modifications thereto.
* Any use, reproduction, disclosure or distribution of this software and related
* documentation without an express license agreement from NVIDIA Corporation is strictly
* prohibited.
*
* TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
* INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
* SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
* LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
* BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
* INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES
*/
int main()
{
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/BufferView.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <sutil/Preprocessor.h>
template <typename T>
struct BufferView
{
CUdeviceptr data CONST_STATIC_INIT( 0 );
unsigned int count CONST_STATIC_INIT( 0 );
unsigned short byte_stride CONST_STATIC_INIT( 0 );
unsigned short elmt_byte_size CONST_STATIC_INIT( 0 );
SUTIL_HOSTDEVICE bool isValid() const
{ return static_cast<bool>( data ); }
SUTIL_HOSTDEVICE operator bool() const
{ return isValid(); }
SUTIL_HOSTDEVICE const T& operator[]( unsigned int idx ) const
{ return *reinterpret_cast<T*>( data + idx*(byte_stride ? byte_stride : sizeof( T ) ) ); }
};
typedef BufferView<unsigned int> GenericBufferView;
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/GeometryData.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <cuda/BufferView.h>
struct GeometryData
{
enum Type
{
TRIANGLE_MESH = 0,
SPHERE = 1,
LINEAR_CURVE_ARRAY = 2,
QUADRATIC_CURVE_ARRAY = 3,
CUBIC_CURVE_ARRAY = 4,
};
struct TriangleMesh
{
GenericBufferView indices;
BufferView<float3> positions;
BufferView<float3> normals;
BufferView<float2> texcoords;
};
struct Sphere
{
float3 center;
float radius;
};
struct Curves
{
BufferView<float2> strand_u; // strand_u at segment start per segment
GenericBufferView strand_i; // strand index per segment
BufferView<uint2> strand_info; // info.x = segment base
// info.y = strand length (segments)
};
Type type;
union
{
TriangleMesh triangle_mesh;
Sphere sphere;
Curves curves;
};
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/Light.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <sutil/Preprocessor.h>
struct Light
{
Light() {}
enum class Falloff : int
{
NONE = 0,
LINEAR,
QUADRATIC
};
enum class Type : int
{
POINT = 0,
AMBIENT = 1
};
struct Point
{
float3 color CONST_STATIC_INIT( { 1.0f, 1.0f, 1.0f } );
float intensity CONST_STATIC_INIT( 1.0f );
float3 position CONST_STATIC_INIT( {} );
Falloff falloff CONST_STATIC_INIT( Falloff::QUADRATIC );
};
struct Ambient
{
float3 color CONST_STATIC_INIT( {1.0f, 1.0f, 1.0f} );
};
Type type;
union
{
Point point;
Ambient ambient;
};
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/LocalGeometry.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <sutil/Matrix.h>
#include <sutil/Preprocessor.h>
#include <sutil/vec_math.h>
#include <cuda/BufferView.h>
#include <cuda/GeometryData.h>
#include <cuda/util.h>
struct LocalGeometry
{
float3 P;
float3 N;
float3 Ng;
float2 UV;
float3 dndu;
float3 dndv;
float3 dpdu;
float3 dpdv;
};
SUTIL_HOSTDEVICE LocalGeometry getLocalGeometry( const GeometryData& geometry_data )
{
LocalGeometry lgeom;
switch( geometry_data.type )
{
case GeometryData::TRIANGLE_MESH:
{
const GeometryData::TriangleMesh& mesh_data = geometry_data.triangle_mesh;
const unsigned int prim_idx = optixGetPrimitiveIndex();
const float2 barys = optixGetTriangleBarycentrics();
uint3 tri = make_uint3(0u, 0u, 0u);
if( mesh_data.indices.elmt_byte_size == 4 )
{
const uint3* indices = reinterpret_cast<uint3*>( mesh_data.indices.data );
tri = indices[ prim_idx ];
}
else
{
const unsigned short* indices = reinterpret_cast<unsigned short*>( mesh_data.indices.data );
const unsigned short idx0 = indices[prim_idx * 3 + 0];
const unsigned short idx1 = indices[prim_idx * 3 + 1];
const unsigned short idx2 = indices[prim_idx * 3 + 2];
tri = make_uint3( idx0, idx1, idx2 );
}
const float3 P0 = mesh_data.positions[ tri.x ];
const float3 P1 = mesh_data.positions[ tri.y ];
const float3 P2 = mesh_data.positions[ tri.z ];
lgeom.P = ( 1.0f-barys.x-barys.y)*P0 + barys.x*P1 + barys.y*P2;
lgeom.P = optixTransformPointFromObjectToWorldSpace( lgeom.P );
float2 UV0, UV1, UV2;
if( mesh_data.texcoords )
{
UV0 = mesh_data.texcoords[ tri.x ];
UV1 = mesh_data.texcoords[ tri.y ];
UV2 = mesh_data.texcoords[ tri.z ];
lgeom.UV = ( 1.0f-barys.x-barys.y)*UV0 + barys.x*UV1 + barys.y*UV2;
}
else
{
UV0 = make_float2( 0.0f, 0.0f );
UV1 = make_float2( 0.0f, 1.0f );
UV2 = make_float2( 1.0f, 0.0f );
lgeom.UV = barys;
}
lgeom.Ng = normalize( cross( P1-P0, P2-P0 ) );
lgeom.Ng = optixTransformNormalFromObjectToWorldSpace( lgeom.Ng );
float3 N0, N1, N2;
if( mesh_data.normals )
{
N0 = mesh_data.normals[ tri.x ];
N1 = mesh_data.normals[ tri.y ];
N2 = mesh_data.normals[ tri.z ];
lgeom.N = ( 1.0f-barys.x-barys.y)*N0 + barys.x*N1 + barys.y*N2;
lgeom.N = normalize( optixTransformNormalFromObjectToWorldSpace( lgeom.N ) );
}
else
{
lgeom.N = N0 = N1 = N2 = lgeom.Ng;
}
const float du1 = UV0.x - UV2.x;
const float du2 = UV1.x - UV2.x;
const float dv1 = UV0.y - UV2.y;
const float dv2 = UV1.y - UV2.y;
const float3 dp1 = P0 - P2;
const float3 dp2 = P1 - P2;
const float3 dn1 = N0 - N2;
const float3 dn2 = N1 - N2;
const float det = du1*dv2 - dv1*du2;
const float invdet = 1.f / det;
lgeom.dpdu = ( dv2 * dp1 - dv1 * dp2) * invdet;
lgeom.dpdv = (-du2 * dp1 + du1 * dp2) * invdet;
lgeom.dndu = ( dv2 * dn1 - dv1 * dn2) * invdet;
lgeom.dndv = (-du2 * dn1 + du1 * dn2) * invdet;
break;
}
case GeometryData::SPHERE:
{
break;
}
default: break;
}
return lgeom;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/MaterialData.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <cuda_runtime.h>
struct MaterialData
{
enum Type
{
PBR = 0
};
struct Pbr
{
float4 base_color = { 1.0f, 1.0f, 1.0f, 1.0f };
float metallic = 1.0f;
float roughness = 1.0f;
cudaTextureObject_t base_color_tex = 0;
cudaTextureObject_t metallic_roughness_tex = 0;
cudaTextureObject_t normal_tex = 0;
};
Type type;
union
{
Pbr pbr;
};
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/curve.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <sutil/vec_math.h>
#include <vector_types.h>
//
// First order polynomial interpolator
//
struct LinearBSplineSegment
{
__device__ __forceinline__ LinearBSplineSegment() {}
__device__ __forceinline__ LinearBSplineSegment( const float4* q ) { initialize( q ); }
__device__ __forceinline__ void initialize( const float4* q )
{
p[0] = q[0];
p[1] = q[1] - q[0]; // pre-transform p[] for fast evaluation
}
__device__ __forceinline__ float radius( const float& u ) const { return p[0].w + p[1].w * u; }
__device__ __forceinline__ float3 position3( float u ) const { return (float3&)p[0] + u * (float3&)p[1]; }
__device__ __forceinline__ float4 position4( float u ) const { return p[0] + u * p[1]; }
__device__ __forceinline__ float min_radius( float u1, float u2 ) const
{
return fminf( radius( u1 ), radius( u2 ) );
}
__device__ __forceinline__ float max_radius( float u1, float u2 ) const
{
if( !p[1].w )
return p[0].w; // a quick bypass for constant width
return fmaxf( radius( u1 ), radius( u2 ) );
}
__device__ __forceinline__ float3 velocity3( float u ) const { return (float3&)p[1]; }
__device__ __forceinline__ float4 velocity4( float u ) const { return p[1]; }
__device__ __forceinline__ float3 acceleration3( float u ) const { return make_float3( 0.f ); }
__device__ __forceinline__ float4 acceleration4( float u ) const { return make_float4( 0.f ); }
__device__ __forceinline__ float derivative_of_radius( float u ) const { return p[1].w; }
float4 p[2]; // pre-transformed "control points" for fast evaluation
};
//
// Second order polynomial interpolator
//
struct QuadraticBSplineSegment
{
__device__ __forceinline__ QuadraticBSplineSegment() {}
__device__ __forceinline__ QuadraticBSplineSegment( const float4* q ) { initializeFromBSpline( q ); }
__device__ __forceinline__ void initializeFromBSpline( const float4* q )
{
// pre-transform control-points for fast evaluation
p[0] = q[1] / 2.0f + q[0] / 2.0f;
p[1] = q[1] - q[0];
p[2] = q[0] / 2.0f - q[1] + q[2] / 2.0f;
}
__device__ __forceinline__ void export2BSpline( float4 bs[3] ) const
{
bs[0] = p[0] - p[1] / 2;
bs[1] = p[0] + p[1] / 2;
bs[2] = p[0] + 1.5f * p[1] + 2 * p[2];
}
__device__ __forceinline__ float3 position3( float u ) const
{
return (float3&)p[0] + u * (float3&)p[1] + u * u * (float3&)p[2];
}
__device__ __forceinline__ float4 position4( float u ) const { return p[0] + u * p[1] + u * u * p[2]; }
__device__ __forceinline__ float radius( float u ) const { return p[0].w + u * ( p[1].w + u * p[2].w ); }
__device__ __forceinline__ float min_radius( float u1, float u2 ) const
{
float root1 = clamp( -0.5f * p[1].w / p[2].w, u1, u2 );
return fminf( fminf( radius( u1 ), radius( u2 ) ), radius( root1 ) );
}
__device__ __forceinline__ float max_radius( float u1, float u2 ) const
{
if( !p[1].w && !p[2].w )
return p[0].w; // a quick bypass for constant width
float root1 = clamp( -0.5f * p[1].w / p[2].w, u1, u2 );
return fmaxf( fmaxf( radius( u1 ), radius( u2 ) ), radius( root1 ) );
}
__device__ __forceinline__ float3 velocity3( float u ) const { return (float3&)p[1] + 2 * u * (float3&)p[2]; }
__device__ __forceinline__ float4 velocity4( float u ) const { return p[1] + 2 * u * p[2]; }
__device__ __forceinline__ float3 acceleration3( float u ) const { return 2 * (float3&)p[2]; }
__device__ __forceinline__ float4 acceleration4( float u ) const { return 2 * p[2]; }
__device__ __forceinline__ float derivative_of_radius( float u ) const { return p[1].w + 2 * u * p[2].w; }
float4 p[3]; // pre-transformed "control points" for fast evaluation
};
//
// Third order polynomial interpolator
//
struct CubicBSplineSegment
{
__device__ __forceinline__ CubicBSplineSegment() {}
__device__ __forceinline__ CubicBSplineSegment( const float4* q ) { initializeFromBSpline( q ); }
__device__ __forceinline__ void initializeFromBSpline( const float4* q )
{
// pre-transform control points for fast evaluation
p[0] = ( q[2] + q[0] ) / 6 + ( 4 / 6.0f ) * q[1];
p[1] = q[2] - q[0];
p[2] = q[2] - q[1];
p[3] = q[3] - q[1];
}
__device__ __forceinline__ void export2BSpline( float4 bs[4] ) const
{
// inverse of initializeFromBSpline
bs[0] = p[0] + ( 4 * p[2] - 5 * p[1] ) / 6;
bs[1] = p[0] + ( p[1] - 2 * p[2] ) / 6;
bs[2] = p[0] + ( p[1] + 4 * p[2] ) / 6;
bs[3] = p[0] + p[3] + ( p[1] - 2 * p[2] ) / 6;
}
__device__ __forceinline__ static float3 terms( float u )
{
float uu = u * u;
float u3 = ( 1 / 6.0f ) * uu * u;
return make_float3( u3 + 0.5f * ( u - uu ), uu - 4 * u3, u3 );
}
__device__ __forceinline__ float3 position3( float u ) const
{
float3 q = terms( u );
return (float3&)p[0] + q.x * (float3&)p[1] + q.y * (float3&)p[2] + q.z * (float3&)p[3];
}
__device__ __forceinline__ float4 position4( float u ) const
{
float3 q = terms( u );
return p[0] + q.x * p[1] + q.y * p[2] + q.z * p[3];
}
__device__ __forceinline__ float radius( float u ) const
{
return p[0].w + u * ( p[1].w / 2 + u * ( ( p[2].w - p[1].w / 2 ) + u * ( p[1].w - 4 * p[2].w + p[3].w ) / 6 ) );
}
__device__ __forceinline__ float min_radius( float u1, float u2 ) const
{
// a + 2 b u - c u^2
float a = p[1].w;
float b = 2 * p[2].w - p[1].w;
float c = 4 * p[2].w - p[1].w - p[3].w;
float rmin = fminf( radius( u1 ), radius( u2 ) );
if( fabsf( c ) < 1e-5f )
{
float root1 = clamp( -0.5f * a / b, u1, u2 );
return fminf( rmin, radius( root1 ) );
}
else
{
float det = b * b + a * c;
det = det <= 0.0f ? 0.0f : sqrt( det );
float root1 = clamp( ( b + det ) / c, u1, u2 );
float root2 = clamp( ( b - det ) / c, u1, u2 );
return fminf( rmin, fminf( radius( root1 ), radius( root2 ) ) );
}
}
__device__ __forceinline__ float max_radius( float u1, float u2 ) const
{
if( !p[1].w && !p[2].w && !p[3].w )
return p[0].w; // a quick bypass for constant width
// a + 2 b u - c u^2
float a = p[1].w;
float b = 2 * p[2].w - p[1].w;
float c = 4 * p[2].w - p[1].w - p[3].w;
float rmax = fmaxf( radius( u1 ), radius( u2 ) );
if( fabsf( c ) < 1e-5f )
{
float root1 = clamp( -0.5f * a / b, u1, u2 );
return fmaxf( rmax, radius( root1 ) );
}
else
{
float det = b * b + a * c;
det = det <= 0.0f ? 0.0f : sqrt( det );
float root1 = clamp( ( b + det ) / c, u1, u2 );
float root2 = clamp( ( b - det ) / c, u1, u2 );
return fmaxf( rmax, fmaxf( radius( root1 ), radius( root2 ) ) );
}
}
__device__ __forceinline__ float3 velocity3( float u ) const
{
// adjust u to avoid problems with tripple knots.
if( u == 0 )
u = 0.000001f;
if( u == 1 )
u = 0.999999f;
float v = 1 - u;
return 0.5f * v * v * (float3&)p[1] + 2 * v * u * (float3&)p[2] + 0.5f * u * u * (float3&)p[3];
}
__device__ __forceinline__ float4 velocity4( float u ) const
{
// adjust u to avoid problems with tripple knots.
if( u == 0 )
u = 0.000001f;
if( u == 1 )
u = 0.999999f;
float v = 1 - u;
return 0.5f * v * v * p[1] + 2 * v * u * p[2] + 0.5f * u * u * p[3];
}
__device__ __forceinline__ float3 acceleration3( float u ) const { return make_float3( acceleration4( u ) ); }
__device__ __forceinline__ float4 acceleration4( float u ) const
{
return 2 * p[2] - p[1] + ( p[1] - 4 * p[2] + p[3] ) * u;
}
__device__ __forceinline__ float derivative_of_radius( float u ) const
{
float v = 1 - u;
return 0.5f * v * v * p[1].w + 2 * v * u * p[2].w + 0.5f * u * u * p[3].w;
}
float4 p[4]; // pre-transformed "control points" for fast evaluation
};
// Compute curve primitive surface normal in object space.
//
// Template parameters:
// CurveType - A B-Spline evaluator class.
// type - 0 ~ cylindrical approximation (correct if radius' == 0)
// 1 ~ conic approximation (correct if curve'' == 0)
// other ~ the bona fide surface normal
//
// Parameters:
// bc - A B-Spline evaluator object.
// u - segment parameter of hit-point.
// ps - hit-point on curve's surface in object space; usually
// computed like this.
// float3 ps = ray_orig + t_hit * ray_dir;
// the resulting point is slightly offset away from the
// surface. For this reason (Warning!) ps gets modified by this
// method, projecting it onto the surface
// in case it is not already on it. (See also inline
// comments.)
//
template <typename CurveType, int type = 2>
__device__ __forceinline__ float3 surfaceNormal( const CurveType& bc, float u, float3& ps )
{
float3 normal;
if( u == 0.0f )
{
normal = -bc.velocity3( 0 ); // special handling for flat endcaps
}
else if( u == 1.0f )
{
normal = bc.velocity3( 1 ); // special handling for flat endcaps
}
else
{
// ps is a point that is near the curve's offset surface,
// usually ray.origin + ray.direction * rayt.
// We will push it exactly to the surface by projecting it to the plane(p,d).
// The function derivation:
// we (implicitly) transform the curve into coordinate system
// {p, o1 = normalize(ps - p), o2 = normalize(curve'(t)), o3 = o1 x o2} in which
// curve'(t) = (0, length(d), 0); ps = (r, 0, 0);
float4 p4 = bc.position4( u );
float3 p = make_float3( p4 );
float r = p4.w; // == length(ps - p) if ps is already on the surface
float4 d4 = bc.velocity4( u );
float3 d = make_float3( d4 );
float dr = d4.w;
float dd = dot( d, d );
float3 o1 = ps - p; // dot(modified_o1, d) == 0 by design:
o1 -= ( dot( o1, d ) / dd ) * d; // first, project ps to the plane(p,d)
o1 *= r / length( o1 ); // and then drop it to the surface
ps = p + o1; // fine-tuning the hit point
if( type == 0 )
{
normal = o1; // cylindrical approximation
}
else
{
if( type != 1 )
{
dd -= dot( bc.acceleration3( u ), o1 );
}
normal = dd * o1 - ( dr * r ) * d;
}
}
return normalize( normal );
}
template <int type = 1>
__device__ __forceinline__ float3 surfaceNormal( const LinearBSplineSegment& bc, float u, float3& ps )
{
float3 normal;
if( u == 0.0f )
{
normal = ps - (float3&)(bc.p[0]); // special handling for round endcaps
}
else if( u >= 1.0f )
{
// reconstruct second control point (Note: the interpolator pre-transforms
// the control-points to speed up repeated evaluation.
const float3 p1 = (float3&)(bc.p[1]) + (float3&)(bc.p[0]);
normal = ps - p1; // special handling for round endcaps
}
else
{
// ps is a point that is near the curve's offset surface,
// usually ray.origin + ray.direction * rayt.
// We will push it exactly to the surface by projecting it to the plane(p,d).
// The function derivation:
// we (implicitly) transform the curve into coordinate system
// {p, o1 = normalize(ps - p), o2 = normalize(curve'(t)), o3 = o1 x o2} in which
// curve'(t) = (0, length(d), 0); ps = (r, 0, 0);
float4 p4 = bc.position4( u );
float3 p = make_float3( p4 );
float r = p4.w; // == length(ps - p) if ps is already on the surface
float4 d4 = bc.velocity4( u );
float3 d = make_float3( d4 );
float dr = d4.w;
float dd = dot( d, d );
float3 o1 = ps - p; // dot(modified_o1, d) == 0 by design:
o1 -= ( dot( o1, d ) / dd ) * d; // first, project ps to the plane(p,d)
o1 *= r / length( o1 ); // and then drop it to the surface
ps = p + o1; // fine-tuning the hit point
if( type == 0 )
{
normal = o1; // cylindrical approximation
}
else
{
normal = dd * o1 - ( dr * r ) * d;
}
}
return normalize( normal );
}
// Compute curve primitive tangent in object space.
//
// Template parameters:
// CurveType - A B-Spline evaluator class.
//
// Parameters:
// bc - A B-Spline evaluator object.
// u - segment parameter of tangent location on curve.
//
template <typename CurveType>
__device__ __forceinline__ float3 curveTangent( const CurveType& bc, float u )
{
float3 tangent = bc.velocity3( u );
return normalize( tangent );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/helpers.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <vector_types.h>
#include <sutil/vec_math.h>
__forceinline__ __device__ float3 toSRGB( const float3& c )
{
float invGamma = 1.0f / 2.4f;
float3 powed = make_float3( powf( c.x, invGamma ), powf( c.y, invGamma ), powf( c.z, invGamma ) );
return make_float3(
c.x < 0.0031308f ? 12.92f * c.x : 1.055f * powed.x - 0.055f,
c.y < 0.0031308f ? 12.92f * c.y : 1.055f * powed.y - 0.055f,
c.z < 0.0031308f ? 12.92f * c.z : 1.055f * powed.z - 0.055f );
}
//__forceinline__ __device__ float dequantizeUnsigned8Bits( const unsigned char i )
//{
// enum { N = (1 << 8) - 1 };
// return min((float)i / (float)N), 1.f)
//}
__forceinline__ __device__ unsigned char quantizeUnsigned8Bits( float x )
{
x = clamp( x, 0.0f, 1.0f );
enum { N = (1 << 8) - 1, Np1 = (1 << 8) };
return (unsigned char)min((unsigned int)(x * (float)Np1), (unsigned int)N);
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
// first apply gamma, then convert to unsigned char
float3 srgb = toSRGB( clamp( c, 0.0f, 1.0f ) );
return make_uchar4( quantizeUnsigned8Bits( srgb.x ), quantizeUnsigned8Bits( srgb.y ), quantizeUnsigned8Bits( srgb.z ), 255u );
}
__forceinline__ __device__ uchar4 make_color( const float4& c )
{
return make_color( make_float3( c.x, c.y, c.z ) );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/random.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
template<unsigned int N>
static __host__ __device__ __inline__ unsigned int tea( unsigned int val0, unsigned int val1 )
{
unsigned int v0 = val0;
unsigned int v1 = val1;
unsigned int s0 = 0;
for( unsigned int n = 0; n < N; n++ )
{
s0 += 0x9e3779b9;
v0 += ((v1<<4)+0xa341316c)^(v1+s0)^((v1>>5)+0xc8013ea4);
v1 += ((v0<<4)+0xad90777d)^(v0+s0)^((v0>>5)+0x7e95761e);
}
return v0;
}
// Generate random unsigned int in [0, 2^24)
static __host__ __device__ __inline__ unsigned int lcg(unsigned int &prev)
{
const unsigned int LCG_A = 1664525u;
const unsigned int LCG_C = 1013904223u;
prev = (LCG_A * prev + LCG_C);
return prev & 0x00FFFFFF;
}
static __host__ __device__ __inline__ unsigned int lcg2(unsigned int &prev)
{
prev = (prev*8121 + 28411) % 134456;
return prev;
}
// Generate random float in [0, 1)
static __host__ __device__ __inline__ float rnd(unsigned int &prev)
{
return ((float) lcg(prev) / (float) 0x01000000);
}
static __host__ __device__ __inline__ unsigned int rot_seed( unsigned int seed, unsigned int frame )
{
return seed ^ frame;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/sphere.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <sutil/vec_math.h>
#include "sphere.h"
#define float3_as_ints( u ) float_as_int( u.x ), float_as_int( u.y ), float_as_int( u.z )
extern "C" __global__ void __intersection__sphere()
{
const sphere::SphereHitGroupData* hit_group_data = reinterpret_cast<sphere::SphereHitGroupData*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
const float3 O = ray_orig - hit_group_data->sphere.center;
const float l = 1.0f / length( ray_dir );
const float3 D = ray_dir * l;
const float radius = hit_group_data->sphere.radius;
float b = dot( O, D );
float c = dot( O, O ) - radius * radius;
float disc = b * b - c;
if( disc > 0.0f )
{
float sdisc = sqrtf( disc );
float root1 = ( -b - sdisc );
float root11 = 0.0f;
bool check_second = true;
const bool do_refine = fabsf( root1 ) > ( 10.0f * radius );
if( do_refine )
{
// refine root1
float3 O1 = O + root1 * D;
b = dot( O1, D );
c = dot( O1, O1 ) - radius * radius;
disc = b * b - c;
if( disc > 0.0f )
{
sdisc = sqrtf( disc );
root11 = ( -b - sdisc );
}
}
float t;
float3 normal;
t = ( root1 + root11 ) * l;
if( t > ray_tmin && t < ray_tmax )
{
normal = ( O + ( root1 + root11 ) * D ) / radius;
if( optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) ) )
check_second = false;
}
if( check_second )
{
float root2 = ( -b + sdisc ) + ( do_refine ? root1 : 0 );
t = root2 * l;
normal = ( O + root2 * D ) / radius;
if( t > ray_tmin && t < ray_tmax )
optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) );
}
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/sphere.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include "GeometryData.h"
namespace sphere {
const unsigned int NUM_ATTRIBUTE_VALUES = 4u;
struct SphereHitGroupData
{
GeometryData::Sphere sphere;
};
} // namespace sphere
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/util.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#ifndef __CUDACC_RTC__
#include <stdio.h>
#endif
#define if_pixel( x_, y_ ) \
const uint3 launch_idx__ = optixGetLaunchIndex(); \
if( launch_idx__.x == (x_) && launch_idx__.y == (y_) ) \
#define print_pixel( x_, y_, str, ... ) \
do \
{ \
const uint3 launch_idx = optixGetLaunchIndex(); \
if( launch_idx.x == (x_) && launch_idx.y == (y_) ) \
{ \
printf( str, __VA_ARGS__ ); \
} \
} while(0);
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/whitted.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/LocalGeometry.h>
#include <cuda/helpers.h>
#include <cuda/random.h>
#include <sutil/vec_math.h>
#include "whitted_cuda.h"
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__pinhole()
{
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const float3 eye = whitted::params.eye;
const float3 U = whitted::params.U;
const float3 V = whitted::params.V;
const float3 W = whitted::params.W;
const int subframe_index = whitted::params.subframe_index;
//
// Generate camera ray
//
unsigned int seed = tea<4>( launch_idx.y * launch_dims.x + launch_idx.x, subframe_index );
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter =
subframe_index == 0 ? make_float2( 0.5f, 0.5f ) : make_float2( rnd( seed ), rnd( seed ) );
const float2 d =
2.0f
* make_float2( ( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y ) )
- 1.0f;
const float3 ray_direction = normalize( d.x * U + d.y * V + W );
const float3 ray_origin = eye;
//
// Trace camera ray
//
whitted::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance( whitted::params.handle, ray_origin, ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
// TODO: timview mode
//
const unsigned int image_index = launch_idx.y * launch_dims.x + launch_idx.x;
float3 accum_color = payload.result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index + 1 );
const float3 accum_color_prev = make_float3( whitted::params.accum_buffer[image_index] );
accum_color = lerp( accum_color_prev, accum_color, a );
}
whitted::params.accum_buffer[image_index] = make_float4( accum_color, 1.0f );
whitted::params.frame_buffer[image_index] = make_color( accum_color );
}
extern "C" __global__ void __miss__constant_radiance()
{
whitted::setPayloadResult( whitted::params.miss_color );
}
extern "C" __global__ void __closesthit__occlusion()
{
whitted::setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
const whitted::HitGroupData* hit_group_data = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= whitted::linearize(
make_float3( tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y ) ) );
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color * ( 1.0f - F0 ) * ( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness * roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN =
2.0f * tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4( 1.0f );
N = normalize( NN.x * normalize( geom.dpdu ) + NN.y * normalize( geom.dpdv ) + NN.z * geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < whitted::params.lights.count; ++i )
{
Light light = whitted::params.lights[i];
if( light.type == Light::Type::POINT )
{
// TODO: optimize
const float L_dist = length( light.point.position - geom.P );
const float3 L = ( light.point.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = whitted::traceOcclusion( whitted::params.handle, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = whitted::schlick( spec_color, V_dot_H );
const float G_vis = whitted::vis( N_dot_L, N_dot_V, alpha );
const float D = whitted::ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F ) * diff_color / M_PIf;
const float3 spec = F * G_vis * D;
result += light.point.color * light.point.intensity * N_dot_L * ( diff + spec );
}
}
}
else if( light.type == Light::Type::AMBIENT )
{
result += light.ambient.color * base_color;
}
}
whitted::setPayloadResult( result );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/whitted.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <vector_types.h>
#include <cuda/BufferView.h>
#include <cuda/GeometryData.h>
#include <cuda/Light.h>
#include <cuda/MaterialData.h>
namespace whitted
{
const unsigned int NUM_PAYLOAD_VALUES = 4u;
struct HitGroupData
{
GeometryData geometry_data;
MaterialData material_data;
};
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT = 2
};
struct LaunchParams
{
unsigned int width;
unsigned int height;
unsigned int subframe_index;
float4* accum_buffer;
uchar4* frame_buffer;
int max_depth;
float3 eye;
float3 U;
float3 V;
float3 W;
BufferView<Light> lights;
float3 miss_color;
OptixTraversableHandle handle;
};
struct PayloadRadiance
{
float3 result;
float importance;
int depth;
};
struct PayloadOcclusion
{
};
} // end namespace whitted
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/cuda/whitted_cuda.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <sutil/vec_math.h>
#include "whitted.h"
namespace whitted {
extern "C" {
__constant__ whitted::LaunchParams params;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ __forceinline__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ __forceinline__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ __forceinline__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ __forceinline__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
whitted::PayloadRadiance* payload
)
{
unsigned int u0=0, u1=0, u2=0, u3=0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
whitted::RAY_TYPE_RADIANCE, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3 );
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->depth = u3;
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
whitted::RAY_TYPE_OCCLUSION, // SBT offset
whitted::RAY_TYPE_COUNT, // SBT stride
whitted::RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
} // namespace whitted
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixBoundValues/optixBoundValues.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// This sample shows how to apply launch parameter specialization.
// That technique allows to replace loads from a given range of the launch parameters
// with a fixed value at compile time. Compiler optimization passes use those constant
// values which may result in improved optimization results.
// The sample demonstrates the usage of the OptixModuleCompileBoundValueEntry struct
// and the OptixModuleCompileOptions::boundValues field.
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Trackball.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <optix_stack_size.h>
#include <GLFW/glfw3.h>
#include "optixBoundValues.h"
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int32_t mouse_button = -1;
int32_t samples_per_launch = 16;
// The number of samples to calculate the light are specified in the launch parameters.
// That value can be specialized to a fixed value at compile time.
// Note than when changing the number of light samples at runtime by pressing the PLUS
// or MINUS keys with specialization enabled recompilation of the closest hit module
// is necessary or it needs to be loaded from the cache.
unsigned int light_samples = 1;
bool specialize = true;
//------------------------------------------------------------------------------
//
// Local types
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct Vertex
{
float x, y, z, pad;
};
struct IndexedTriangle
{
uint32_t v1, v2, v3, pad;
};
struct Instance
{
float transform[12];
};
struct PathTracerState
{
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_gas_output_buffer = 0; // Triangle AS memory
CUdeviceptr d_vertices = 0;
OptixModule ptx_module = 0;
OptixModule ptx_module_radiance = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup radiance_miss_group = 0;
OptixProgramGroup occlusion_miss_group = 0;
OptixProgramGroup radiance_hit_group = 0;
OptixProgramGroup occlusion_hit_group = 0;
CUstream stream = 0;
Params params;
Params* d_params;
OptixShaderBindingTable sbt = {};
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t TRIANGLE_COUNT = 32;
const int32_t MAT_COUNT = 4;
const static std::array<Vertex, TRIANGLE_COUNT* 3> g_vertices =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
// Left wall -- red lambert
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
// Short block -- white lambert
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
// Tall block -- white lambert
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
// Ceiling light -- emmissive
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 332.0f, 0.0f }
} };
static std::array<uint32_t, TRIANGLE_COUNT> g_mat_indices = {{
0, 0, // Floor -- white lambert
0, 0, // Ceiling -- white lambert
0, 0, // Back wall -- white lambert
1, 1, // Right wall -- green lambert
2, 2, // Left wall -- red lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Short block -- white lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Tall block -- white lambert
3, 3 // Ceiling light -- emmissive
}};
const std::array<float3, MAT_COUNT> g_emission_colors =
{ {
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 15.0f, 15.0f, 5.0f }
} };
const std::array<float3, MAT_COUNT> g_diffuse_colors =
{ {
{ 0.80f, 0.80f, 0.80f },
{ 0.05f, 0.80f, 0.05f },
{ 0.80f, 0.05f, 0.05f },
{ 0.50f, 0.00f, 0.00f }
} };
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking( static_cast<int>( xpos ), static_cast<int>( ypos ) );
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params.width, params.height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params.width, params.height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
params.width = res_x;
params.height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
void updatePipeline( PathTracerState& state );
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
else if( key == GLFW_KEY_S )
{
specialize = !specialize;
updatePipeline( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
}
static void charCallback( GLFWwindow* window, unsigned int codepoint )
{
if( codepoint == '+' )
{
++light_samples;
if( specialize )
updatePipeline( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
else if( codepoint == '-' )
{
if( light_samples > 1 )
{
--light_samples;
if( specialize )
updatePipeline( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if( trackball.wheelEvent( (int)yscroll ) )
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --launch-samples | -s Number of samples per pixel per launch (default 16)\n";
std::cerr << " --light-samples | -l Number of radiance samples (default 1)\n";
std::cerr << " --no-specialize ...\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 768x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( PathTracerState& state )
{
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &state.params.accum_buffer ),
state.params.width * state.params.height * sizeof( float4 )
) );
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.samples_per_launch = samples_per_launch;
state.params.light_samples = light_samples;
state.params.subframe_index = 0u;
state.params.light.emission = make_float3( 15.0f, 15.0f, 5.0f );
state.params.light.corner = make_float3( 343.0f, 548.5f, 227.0f );
state.params.light.v1 = make_float3( 0.0f, 0.0f, 105.0f );
state.params.light.v2 = make_float3( -130.0f, 0.0f, 0.0f );
state.params.light.normal = normalize( cross( state.params.light.v1, state.params.light.v2 ) );
state.params.handle = state.gas_handle;
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params ), sizeof( Params ) ) );
}
void handleCameraUpdate( Params& params )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( params.width ) / static_cast<float>( params.height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
// Realloc accumulation buffer
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( ¶ms.accum_buffer ),
params.width * params.height * sizeof( float4 )
) );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
// Update params on device
if( camera_changed || resize_dirty )
params.subframe_index = 0;
params.light_samples = light_samples;
handleCameraUpdate( params );
handleResize( output_buffer, params );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, PathTracerState& state )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync(
reinterpret_cast<void*>( state.d_params ),
&state.params, sizeof( Params ),
cudaMemcpyHostToDevice, state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void initCameraState()
{
camera.setEye( make_float3( 278.0f, 273.0f, -900.0f ) );
camera.setLookat( make_float3( 278.0f, 273.0f, 330.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame(
make_float3( 1.0f, 0.0f, 0.0f ),
make_float3( 0.0f, 0.0f, 1.0f ),
make_float3( 0.0f, 1.0f, 0.0f )
);
trackball.setGimbalLock( true );
}
void createContext( PathTracerState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cu_ctx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
#ifdef DEBUG
// Enables validation mode for OptiX:
// Enables all debug exceptions during optix launches and stops on first exception.
// Enables verification of launch parameter specialization values with the current launch parameter values.
options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
#endif
OPTIX_CHECK( optixDeviceContextCreate( cu_ctx, &options, &context ) );
state.context = context;
}
void buildMeshAccel( PathTracerState& state )
{
//
// copy mesh data to device
//
const size_t vertices_size_in_bytes = g_vertices.size() * sizeof( Vertex );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_vertices ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( state.d_vertices ),
g_vertices.data(), vertices_size_in_bytes,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_mat_indices = 0;
const size_t mat_indices_size_in_bytes = g_mat_indices.size() * sizeof( uint32_t );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_mat_indices ), mat_indices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_mat_indices ),
g_mat_indices.data(),
mat_indices_size_in_bytes,
cudaMemcpyHostToDevice
) );
//
// Build triangle GAS
//
uint32_t triangle_input_flags[MAT_COUNT] = // One per SBT record for this build input
{
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT
};
OptixBuildInput triangle_input = {};
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes = sizeof( Vertex );
triangle_input.triangleArray.numVertices = static_cast<uint32_t>( g_vertices.size() );
triangle_input.triangleArray.vertexBuffers = &state.d_vertices;
triangle_input.triangleArray.flags = triangle_input_flags;
triangle_input.triangleArray.numSbtRecords = MAT_COUNT;
triangle_input.triangleArray.sbtIndexOffsetBuffer = d_mat_indices;
triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = sizeof( uint32_t );
triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = sizeof( uint32_t );
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
state.context,
&accel_options,
&triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&triangle_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_mat_indices ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof(size_t), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.gas_handle, state.d_gas_output_buffer, compacted_gas_size, &state.gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createRadianceModule( PathTracerState& state )
{
OptixModuleCompileOptions module_compile_options ={};
OptixModuleCompileBoundValueEntry boundValue ={};
if( specialize )
{
boundValue.pipelineParamOffsetInBytes = offsetof( Params, light_samples );
boundValue.sizeInBytes = sizeof( Params::light_samples );
boundValue.boundValuePtr = &light_samples;
boundValue.annotation = "light_samples";
module_compile_options.numBoundValues = 1;
module_compile_options.boundValues = &boundValue;
}
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixBoundValues_ch.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module_radiance
) );
}
void createModule( PathTracerState& state )
{
OptixModuleCompileOptions module_compile_options = {};
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
state.pipeline_compile_options.numPayloadValues = 2;
state.pipeline_compile_options.numAttributeValues = 2;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixBoundValues.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module
) );
createRadianceModule( state );
}
void createRadianceProgramGroup( PathTracerState& state )
{
char log[2048];
size_t sizeof_log = sizeof( log );
OptixProgramGroupOptions program_group_options ={};
OptixProgramGroupDesc hit_prog_group_desc ={};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module_radiance;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.radiance_hit_group
) );
}
void createProgramGroups( PathTracerState& state )
{
OptixProgramGroupOptions program_group_options = {};
char log[2048];
size_t sizeof_log = sizeof( log );
{
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.raygen_prog_group
) );
}
{
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__radiance";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.radiance_miss_group
) );
memset( &miss_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = nullptr; // NULL miss program for occlusion rays
miss_prog_group_desc.miss.entryFunctionName = nullptr;
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_miss_group
) );
}
{
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
sizeof_log = sizeof( log );
OPTIX_CHECK( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_hit_group
) );
}
createRadianceProgramGroup( state );
}
void createPipeline( PathTracerState& state )
{
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.radiance_miss_group,
state.occlusion_miss_group,
state.radiance_hit_group,
state.occlusion_hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = 2;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline
) );
// We need to specify the max traversal depth. Calculate the stack sizes, so we can specify all
// parameters to optixPipelineSetStackSize.
OptixStackSizes stack_sizes = {};
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.raygen_prog_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.radiance_miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.occlusion_miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.radiance_hit_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.occlusion_hit_group, &stack_sizes ) );
uint32_t max_trace_depth = 2;
uint32_t max_cc_depth = 0;
uint32_t max_dc_depth = 0;
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes(
&stack_sizes,
max_trace_depth,
max_cc_depth,
max_dc_depth,
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state,
&continuation_stack_size
) );
const uint32_t max_traversal_depth = 1;
OPTIX_CHECK( optixPipelineSetStackSize(
state.pipeline,
direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state,
continuation_stack_size,
max_traversal_depth
) );
}
void allocateSBT( PathTracerState& state )
{
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>(&state.sbt.raygenRecord), raygen_record_size ) );
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>(&state.sbt.missRecordBase), miss_record_size * RAY_TYPE_COUNT ) );
state.sbt.missRecordStrideInBytes = static_cast<uint32_t>(miss_record_size);
state.sbt.missRecordCount = RAY_TYPE_COUNT;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>(&state.sbt.hitgroupRecordBase),
hitgroup_record_size * RAY_TYPE_COUNT * MAT_COUNT
) );
state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>(hitgroup_record_size);
state.sbt.hitgroupRecordCount = RAY_TYPE_COUNT * MAT_COUNT;
}
void fillSBT( PathTracerState& state )
{
const size_t raygen_record_size = sizeof( RayGenRecord );
RayGenRecord rg_sbt ={};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.raygenRecord),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
const size_t miss_record_size = sizeof( MissRecord );
MissRecord ms_sbt[2];
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_miss_group, &ms_sbt[0] ) );
ms_sbt[0].data.bg_color = make_float4( 0.0f );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_miss_group, &ms_sbt[1] ) );
ms_sbt[1].data.bg_color = make_float4( 0.0f );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.missRecordBase),
ms_sbt,
miss_record_size*RAY_TYPE_COUNT,
cudaMemcpyHostToDevice
) );
}
void fillHitGroupSBT( PathTracerState& state )
{
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
HitGroupRecord hitgroup_records[RAY_TYPE_COUNT * MAT_COUNT];
for( int i = 0; i < MAT_COUNT; ++i )
{
{
const int sbt_idx = i * RAY_TYPE_COUNT + 0; // SBT for radiance ray-type for ith material
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[sbt_idx].data.emission_color = g_emission_colors[i];
hitgroup_records[sbt_idx].data.diffuse_color = g_diffuse_colors[i];
hitgroup_records[sbt_idx].data.vertices = reinterpret_cast<float4*>(state.d_vertices);
}
{
const int sbt_idx = i * RAY_TYPE_COUNT + 1; // SBT for occlusion ray-type for ith material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
}
}
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.hitgroupRecordBase),
hitgroup_records,
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT,
cudaMemcpyHostToDevice
) );
}
void createSBT( PathTracerState& state )
{
allocateSBT( state );
fillSBT( state );
fillHitGroupSBT( state );
}
void updatePipeline( PathTracerState& state )
{
// destroy old stuff
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_hit_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module_radiance ) );
createRadianceModule( state );
createRadianceProgramGroup( state );
createPipeline( state );
fillHitGroupSBT( state );
}
void cleanupState( PathTracerState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_miss_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module_radiance ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.params.accum_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
void displaySpecializationInfo( GLFWwindow* window )
{
static char display_text[256];
sutil::beginFrameImGui();
sprintf( display_text,
"light samples [+/-]: %d\n"
"specialization [S] : %s\n", light_samples, (specialize ? "on" : "off") );
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
sutil::displayText( display_text, 10.0f, (float)params.height - 50.f );
sutil::endFrameImGui();
}
int main( int argc, char* argv[] )
{
PathTracerState state;
state.params.width = 768;
state.params.height = 768;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--no-specialize" )
{
specialize = false;
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else if( arg == "--light-samples" || arg == "-l" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
light_samples = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext( state );
buildMeshAccel( state );
createModule( state );
createProgramGroups( state );
createPipeline( state );
createSBT( state );
initLaunchParams( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixBoundValues", state.params.width, state.params.height );
glfwSetMouseButtonCallback( window, mouseButtonCallback );
glfwSetCursorPosCallback( window, cursorPosCallback );
glfwSetWindowSizeCallback( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback( window, keyCallback );
glfwSetCharCallback( window, charCallback );
glfwSetScrollCallback( window, scrollCallback );
glfwSetWindowUserPointer( window, &state );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateState( output_buffer, state.params );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
displaySpecializationInfo( window );
glfwSwapBuffers( window );
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
handleCameraUpdate( state.params );
handleResize( output_buffer, state.params );
launchSubframe( output_buffer, state );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixBoundValues/optixBoundValues.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixBoundValues.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->bg_color );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixBoundValues/optixBoundValues.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT
};
struct ParallelogramLight
{
float3 corner;
float3 v1, v2;
float3 normal;
float3 emission;
};
struct Params
{
unsigned int subframe_index;
float4* accum_buffer;
uchar4* frame_buffer;
unsigned int width;
unsigned int height;
unsigned int samples_per_launch;
unsigned int light_samples;
float3 eye;
float3 U;
float3 V;
float3 W;
ParallelogramLight light; // TODO: make light list
OptixTraversableHandle handle;
};
struct RayGenData
{
};
struct MissData
{
float4 bg_color;
};
struct HitGroupData
{
float3 emission_color;
float3 diffuse_color;
float4* vertices;
};
#if defined( __CUDACC__ )
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb( const float3& normal )
{
m_normal = normal;
if( fabs( m_normal.x ) > fabs( m_normal.z ) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize( m_binormal );
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform( float3& p ) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>(i0) << 32 | i1;
void* ptr = reinterpret_cast<void*>(uptr);
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>(ptr);
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>(unpackPointer( u0, u1 ));
}
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixBoundValues/optixBoundValues_ch.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixBoundValues.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
RadiancePRD* prd = getPRD();
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.light_samples; ++i )
{
const float z1 = rnd( seed );
const float z2 = rnd( seed );
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length( light_pos - P );
const float3 L = normalize( light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length( cross( light.v1, light.v2 ) );
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
result += (light.emission * weight);
}
}
}
prd->radiance += ( result / params.light_samples );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCallablePrograms/optixCallablePrograms.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Trackball.h>
#include <sutil/sutil.h>
#include <GLFW/glfw3.h>
#include <cstring>
#include <iomanip>
#include <cuda/whitted.h>
#include "optixCallablePrograms.h"
//------------------------------------------------------------------------------
//
// Globals
//
//------------------------------------------------------------------------------
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Shading state
bool shading_changed = false;
unsigned int dc_index = 0;
// Mouse state
int32_t mouse_button = -1;
//------------------------------------------------------------------------------
//
// Local types
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<EmptyData> RayGenRecord;
typedef Record<EmptyData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
typedef Record<EmptyData> CallablesRecord;
struct CallableProgramsState
{
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0;
CUdeviceptr d_gas_output_buffer = 0;
OptixModule camera_module = 0;
OptixModule geometry_module = 0;
OptixModule shading_module = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup miss_prog_group = 0;
OptixProgramGroup hitgroup_prog_group = 0;
OptixProgramGroup callable_prog_groups[3] = {};
OptixPipeline pipeline = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
CUstream stream = 0;
whitted::LaunchParams params = {};
whitted::LaunchParams* d_params = 0;
OptixShaderBindingTable sbt = {};
};
//------------------------------------------------------------------------------
//
// Geometry data
//
//------------------------------------------------------------------------------
const GeometryData::Sphere g_sphere = {
{0.f, 0.f, 0.f}, // center
1.0f // radius
};
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking( static_cast<int>( xpos ), static_cast<int>( ypos ) );
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
whitted::LaunchParams* params = static_cast<whitted::LaunchParams*>( glfwGetWindowUserPointer( window ) );
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params->width, params->height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params->width, params->height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
whitted::LaunchParams* params = static_cast<whitted::LaunchParams*>( glfwGetWindowUserPointer( window ) );
params->width = res_x;
params->height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_SPACE )
{
shading_changed = true;
dc_index = ( dc_index + 1 ) % 3;
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if( trackball.wheelEvent( (int)yscroll ) )
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 768x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( CallableProgramsState& state )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.params.accum_buffer ),
state.params.width * state.params.height * sizeof( float4 ) ) );
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.subframe_index = 0u;
// Set ambient light color and point light position
std::vector<Light> lights( 2 );
lights[0].type = Light::Type::AMBIENT;
lights[0].ambient.color = make_float3( 0.4f, 0.4f, 0.4f );
lights[1].type = Light::Type::POINT;
lights[1].point.color = make_float3( 1.0f, 1.0f, 1.0f );
lights[1].point.intensity = 1.0f;
lights[1].point.position = make_float3( 10.0f, 10.0f, -10.0f );
lights[1].point.falloff = Light::Falloff::QUADRATIC;
state.params.lights.count = static_cast<unsigned int>( lights.size() );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.params.lights.data ), lights.size() * sizeof( Light ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.params.lights.data ), lights.data(),
lights.size() * sizeof( Light ), cudaMemcpyHostToDevice ) );
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params ), sizeof( whitted::LaunchParams ) ) );
state.params.handle = state.gas_handle;
}
static void sphere_bound( float3 center, float radius, float result[6] )
{
OptixAabb* aabb = reinterpret_cast<OptixAabb*>( result );
float3 m_min = center - radius;
float3 m_max = center + radius;
*aabb = {m_min.x, m_min.y, m_min.z, m_max.x, m_max.y, m_max.z};
}
static void buildGas( const CallableProgramsState& state,
const OptixAccelBuildOptions& accel_options,
const OptixBuildInput& build_input,
OptixTraversableHandle& gas_handle,
CUdeviceptr& d_gas_output_buffer )
{
OptixAccelBufferSizes gas_buffer_sizes;
CUdeviceptr d_temp_buffer_gas;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accel_options, &build_input, 1, &gas_buffer_sizes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer_gas ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output and size of compacted GAS
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ), compactedSizeOffset + 8 ) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild( state.context, 0, &accel_options, &build_input, 1, d_temp_buffer_gas,
gas_buffer_sizes.tempSizeInBytes, d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes, &gas_handle, &emitProperty, 1 ) );
CUDA_CHECK( cudaFree( (void*)d_temp_buffer_gas ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, gas_handle, d_gas_output_buffer, compacted_gas_size, &gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createGeometry( CallableProgramsState& state )
{
//
// Build Custom Primitive for Sphere
//
// Load AABB into device memory
OptixAabb aabb;
CUdeviceptr d_aabb;
sphere_bound( g_sphere.center, g_sphere.radius, reinterpret_cast<float*>( &aabb ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_aabb ), sizeof( OptixAabb ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_aabb ), &aabb, sizeof( OptixAabb ), cudaMemcpyHostToDevice ) );
// Setup AABB build input
uint32_t aabb_input_flags[1] = {OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT};
const uint32_t sbt_index[1] = {0};
CUdeviceptr d_sbt_index;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_sbt_index ), sizeof( uint32_t ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_sbt_index ), sbt_index, sizeof( uint32_t ), cudaMemcpyHostToDevice ) );
OptixBuildInput aabb_input = {};
aabb_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
aabb_input.customPrimitiveArray.aabbBuffers = &d_aabb;
aabb_input.customPrimitiveArray.flags = aabb_input_flags;
aabb_input.customPrimitiveArray.numSbtRecords = 1;
aabb_input.customPrimitiveArray.numPrimitives = 1;
aabb_input.customPrimitiveArray.sbtIndexOffsetBuffer = d_sbt_index;
aabb_input.customPrimitiveArray.sbtIndexOffsetSizeInBytes = sizeof( uint32_t );
aabb_input.customPrimitiveArray.primitiveIndexOffset = 0;
OptixAccelBuildOptions accel_options = {
OPTIX_BUILD_FLAG_ALLOW_COMPACTION, // buildFlags
OPTIX_BUILD_OPERATION_BUILD // operation
};
buildGas( state, accel_options, aabb_input, state.gas_handle, state.d_gas_output_buffer );
CUDA_CHECK( cudaFree( (void*)d_aabb ) );
CUDA_CHECK( cudaFree( (void*)d_sbt_index ) );
}
void createModules( CallableProgramsState& state )
{
OptixModuleCompileOptions module_compile_options = {};
char log[2048];
size_t sizeof_log = sizeof( log );
{
size_t inputSize = 0;
const char* input = sutil::getInputData( nullptr, nullptr, "whitted.cu", inputSize );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( state.context, &module_compile_options, &state.pipeline_compile_options,
input, inputSize, log, &sizeof_log, &state.camera_module ) );
}
{
size_t inputSize = 0;
const char* input = sutil::getInputData( nullptr, nullptr, "sphere.cu", inputSize );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( state.context, &module_compile_options, &state.pipeline_compile_options,
input, inputSize, log, &sizeof_log, &state.geometry_module ) );
}
{
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixCallablePrograms.cu", inputSize );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( state.context, &module_compile_options, &state.pipeline_compile_options,
input, inputSize, log, &sizeof_log, &state.shading_module ) );
}
}
static void createCameraProgram( CallableProgramsState& state, std::vector<OptixProgramGroup>& program_groups )
{
OptixProgramGroup cam_prog_group;
OptixProgramGroupOptions cam_prog_group_options = {};
OptixProgramGroupDesc cam_prog_group_desc = {};
cam_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
cam_prog_group_desc.raygen.module = state.camera_module;
cam_prog_group_desc.raygen.entryFunctionName = "__raygen__pinhole";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &cam_prog_group_desc, 1, &cam_prog_group_options, log,
&sizeof_log, &cam_prog_group ) );
program_groups.push_back( cam_prog_group );
state.raygen_prog_group = cam_prog_group;
}
static void createSphereProgram( CallableProgramsState& state, std::vector<OptixProgramGroup>& program_groups )
{
OptixProgramGroup hitgroup_prog_group;
OptixProgramGroupOptions hitgroup_prog_group_options = {};
OptixProgramGroupDesc hitgroup_prog_group_desc = {};
hitgroup_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP,
hitgroup_prog_group_desc.hitgroup.moduleIS = state.geometry_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameIS = "__intersection__sphere";
hitgroup_prog_group_desc.hitgroup.moduleCH = state.shading_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
hitgroup_prog_group_desc.hitgroup.moduleAH = nullptr;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameAH = nullptr;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &hitgroup_prog_group_desc, 1, &hitgroup_prog_group_options,
log, &sizeof_log, &hitgroup_prog_group ) );
program_groups.push_back( hitgroup_prog_group );
state.hitgroup_prog_group = hitgroup_prog_group;
// Callable programs
OptixProgramGroupOptions callable_prog_group_options = {};
OptixProgramGroupDesc callable_prog_group_descs[3] = {};
callable_prog_group_descs[0].kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
callable_prog_group_descs[0].callables.moduleDC = state.shading_module;
callable_prog_group_descs[0].callables.entryFunctionNameDC = "__direct_callable__phong_shade";
callable_prog_group_descs[0].callables.moduleCC = state.shading_module;
callable_prog_group_descs[0].callables.entryFunctionNameCC = "__continuation_callable__raydir_shade";
callable_prog_group_descs[1].kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
callable_prog_group_descs[1].callables.moduleDC = state.shading_module;
callable_prog_group_descs[1].callables.entryFunctionNameDC = "__direct_callable__checkered_shade";
callable_prog_group_descs[2].kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
callable_prog_group_descs[2].callables.moduleDC = state.shading_module;
callable_prog_group_descs[2].callables.entryFunctionNameDC = "__direct_callable__normal_shade";
OPTIX_CHECK( optixProgramGroupCreate( state.context, callable_prog_group_descs, 3, &callable_prog_group_options,
log, &sizeof_log, state.callable_prog_groups ) );
program_groups.push_back( state.callable_prog_groups[0] );
program_groups.push_back( state.callable_prog_groups[1] );
program_groups.push_back( state.callable_prog_groups[2] );
}
static void createMissProgram( CallableProgramsState& state, std::vector<OptixProgramGroup>& program_groups )
{
OptixProgramGroupOptions miss_prog_group_options = {};
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.shading_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__raydir_shade";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &miss_prog_group_desc, 1, &miss_prog_group_options, log,
&sizeof_log, &state.miss_prog_group ) );
program_groups.push_back( state.miss_prog_group );
}
void createPipeline( CallableProgramsState& state )
{
const uint32_t max_trace_depth = 1;
const uint32_t max_cc_depth = 1;
const uint32_t max_dc_depth = 1;
const uint32_t max_traversal_depth = 1;
std::vector<OptixProgramGroup> program_groups;
state.pipeline_compile_options = {
false, // usesMotionBlur
OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS, // traversableGraphFlags
whitted::NUM_PAYLOAD_VALUES, // numPayloadValues
sphere::NUM_ATTRIBUTE_VALUES, // numAttributeValues
OPTIX_EXCEPTION_FLAG_NONE, // exceptionFlags
"params" // pipelineLaunchParamsVariableName
};
// Prepare program groups
createModules( state );
createCameraProgram( state, program_groups );
createSphereProgram( state, program_groups );
createMissProgram( state, program_groups );
// Link program groups to pipeline
OptixPipelineLinkOptions pipeline_link_options = {
max_trace_depth, // maxTraceDepth
OPTIX_COMPILE_DEBUG_LEVEL_FULL // debugLevel
};
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate( state.context, &state.pipeline_compile_options, &pipeline_link_options,
program_groups.data(), static_cast<unsigned int>( program_groups.size() ),
log, &sizeof_log, &state.pipeline ) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth, max_cc_depth, max_dc_depth, &direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size, max_traversal_depth ) );
}
void syncDCShaderIndexToSbt( CallableProgramsState& state )
{
// Update the dc_index in HitGroupData so that the closest hit program invokes the correct DC for shading
HitGroupRecord hitgroup_record;
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_group, &hitgroup_record ) );
hitgroup_record.data.dc_index = dc_index;
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase
+ ( sizeof( hitgroup_record.header ) + sizeof( GeometryData::Sphere ) ) ),
&hitgroup_record.data.dc_index, sizeof( unsigned int ), cudaMemcpyHostToDevice ) );
}
void createSBT( CallableProgramsState& state )
{
// Raygen program record
{
RayGenRecord raygen_record;
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &raygen_record ) );
CUdeviceptr d_raygen_record;
size_t sizeof_raygen_record = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_raygen_record ), sizeof_raygen_record ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_raygen_record ), &raygen_record, sizeof_raygen_record, cudaMemcpyHostToDevice ) );
state.sbt.raygenRecord = d_raygen_record;
}
// Miss program record
{
MissRecord miss_record;
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_prog_group, &miss_record ) );
CUdeviceptr d_miss_record;
size_t sizeof_miss_record = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_miss_record ), sizeof_miss_record ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_miss_record ), &miss_record, sizeof_miss_record, cudaMemcpyHostToDevice ) );
state.sbt.missRecordBase = d_miss_record;
state.sbt.missRecordCount = 1;
state.sbt.missRecordStrideInBytes = static_cast<uint32_t>( sizeof_miss_record );
}
// Hitgroup program record
{
HitGroupRecord hitgroup_record;
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_group, &hitgroup_record ) );
hitgroup_record.data.sphere = g_sphere;
hitgroup_record.data.dc_index = dc_index;
CUdeviceptr d_hitgroup_record;
size_t sizeof_hitgroup_record = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_hitgroup_record ), sizeof_hitgroup_record ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_hitgroup_record ), &hitgroup_record, sizeof_hitgroup_record,
cudaMemcpyHostToDevice ) );
state.sbt.hitgroupRecordBase = d_hitgroup_record;
state.sbt.hitgroupRecordCount = 1;
state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>( sizeof_hitgroup_record );
}
// Callables program record
{
CallablesRecord callable_records[3];
OPTIX_CHECK( optixSbtRecordPackHeader( state.callable_prog_groups[0], &callable_records[0] ) );
OPTIX_CHECK( optixSbtRecordPackHeader( state.callable_prog_groups[1], &callable_records[1] ) );
OPTIX_CHECK( optixSbtRecordPackHeader( state.callable_prog_groups[2], &callable_records[2] ) );
CUdeviceptr d_callable_records;
size_t sizeof_callable_record = sizeof( CallablesRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_callable_records ), sizeof_callable_record * 3 ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_callable_records ), callable_records,
sizeof_callable_record * 3, cudaMemcpyHostToDevice ) );
state.sbt.callablesRecordBase = d_callable_records;
state.sbt.callablesRecordCount = 3;
state.sbt.callablesRecordStrideInBytes = static_cast<unsigned int>( sizeof_callable_record );
}
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void createContext( CallableProgramsState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
state.context = context;
}
//
// Handle updates
//
void initCameraState()
{
camera.setEye( make_float3( 0.0f, 0.0f, -3.0f ) );
camera.setLookat( make_float3( 0.0f, 0.0f, 0.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 60.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ), make_float3( 0.0f, 0.0f, 1.0f ), make_float3( 0.0f, 1.0f, 0.0f ) );
trackball.setGimbalLock( true );
}
void handleCameraUpdate( CallableProgramsState& state )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( state.params.width ) / static_cast<float>( state.params.height ) );
state.params.eye = camera.eye();
camera.UVWFrame( state.params.U, state.params.V, state.params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, whitted::LaunchParams& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
// Realloc accumulation buffer
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( ¶ms.accum_buffer ), params.width * params.height * sizeof( float4 ) ) );
}
void handleShading( CallableProgramsState& state )
{
if( !shading_changed )
return;
shading_changed = false;
syncDCShaderIndexToSbt( state );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, CallableProgramsState& state )
{
// Update params on device
if( camera_changed || resize_dirty || shading_changed )
state.params.subframe_index = 0;
handleCameraUpdate( state );
handleResize( output_buffer, state.params );
handleShading( state );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, CallableProgramsState& state )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( state.d_params ), &state.params,
sizeof( whitted::LaunchParams ), cudaMemcpyHostToDevice, state.stream ) );
OPTIX_CHECK( optixLaunch( state.pipeline, state.stream, reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( whitted::LaunchParams ), &state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display( output_buffer.width(), output_buffer.height(), framebuf_res_x, framebuf_res_y, output_buffer.getPBO() );
}
void cleanupState( CallableProgramsState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.hitgroup_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.callable_prog_groups[0] ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.callable_prog_groups[1] ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.callable_prog_groups[2] ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.miss_prog_group ) );
OPTIX_CHECK( optixModuleDestroy( state.shading_module ) );
OPTIX_CHECK( optixModuleDestroy( state.geometry_module ) );
OPTIX_CHECK( optixModuleDestroy( state.camera_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaStreamDestroy( state.stream ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.callablesRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.params.accum_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.params.lights.data ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
}
int main( int argc, char* argv[] )
{
CallableProgramsState state;
state.params.width = 768;
state.params.height = 768;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext( state );
createGeometry( state );
createPipeline( state );
createSBT( state );
initLaunchParams( state );
//
// Render loop
//
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixCallablePrograms", state.params.width, state.params.height );
glfwSetMouseButtonCallback( window, mouseButtonCallback );
glfwSetCursorPosCallback( window, cursorPosCallback );
glfwSetWindowSizeCallback( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback( window, keyCallback );
glfwSetScrollCallback( window, scrollCallback );
glfwSetWindowUserPointer( window, &state.params );
{
// output_buffer needs to be destroyed before cleanupUI is called
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.width, state.params.height );
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateState( output_buffer, state );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers( window );
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.width, state.params.height );
handleCameraUpdate( state );
handleResize( output_buffer, state.params );
handleShading( state );
launchSubframe( output_buffer, state );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCallablePrograms/optixCallablePrograms.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/whitted_cuda.h>
#include "optixCallablePrograms.h"
// Direct callables for shading
extern "C" __device__ float3 __direct_callable__phong_shade( float3 hit_point, float3 ray_dir, float3 normal )
{
float3 Ka = {0.2f, 0.5f, 0.5f};
float3 Kd = {0.2f, 0.7f, 0.8f};
float3 Ks = {0.9f, 0.9f, 0.9f};
float phong_exp = 64.0f;
float3 result = make_float3( 0.0f );
for( int i = 0; i < whitted::params.lights.count; ++i )
{
Light light = whitted::params.lights[i];
if( light.type == Light::Type::POINT )
{
// compute direct lighting
float Ldist = length( light.point.position - hit_point );
float3 L = normalize( light.point.position - hit_point );
float nDl = dot( normal, L );
result += Kd * nDl * light.point.color;
float3 H = normalize( L - ray_dir );
float nDh = dot( normal, H );
if( nDh > 0 )
{
float power = pow( nDh, phong_exp );
result += Ks * power * light.point.color;
}
}
else if( light.type == Light::Type::AMBIENT )
{
// ambient contribution
result += Ka * light.ambient.color;
}
}
return result;
}
extern "C" __device__ float3 __direct_callable__checkered_shade( float3 hit_point, float3 ray_dir, float3 normal )
{
float3 result;
float value = dot( normal, ray_dir );
if( value < 0 )
{
value *= -1;
}
float3 sphere_normal = normalize( hit_point );
float a = acos( sphere_normal.y );
float b = atan2( sphere_normal.x, sphere_normal.z ) + M_PIf;
Light::Ambient light = whitted::params.lights[0].ambient;
if( ( fmod( a, M_PIf / 8 ) < M_PIf / 16 ) ^ ( fmod( b, M_PIf / 4 ) < M_PIf / 8 ) )
{
result = light.color + ( value * make_float3( 0.0f ) );
}
else
{
result = light.color + ( value * make_float3( 1.0f ) );
}
return clamp( result, 0.0f, 1.0f );
}
extern "C" __device__ float3 __direct_callable__normal_shade( float3 hit_point, float3 ray_dir, float3 normal )
{
return normalize( normal ) * 0.5f + 0.5f;
}
// Closest hit
extern "C" __global__ void __closesthit__radiance()
{
const HitGroupData* hitgroup_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
float3 hit_point = ray_orig + ray_t * ray_dir;
float3 object_normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ) );
float3 world_normal = normalize( optixTransformNormalFromObjectToWorldSpace( object_normal ) );
float3 ffnormal = faceforward( world_normal, -ray_dir, world_normal );
// Use a direct callable to set the result
float3 result = optixDirectCall<float3, float3, float3, float3>( hitgroup_data->dc_index, hit_point, ray_dir, ffnormal );
whitted::setPayloadResult( result );
}
// Continuation callable for background
extern "C" __device__ float3 __continuation_callable__raydir_shade( float3 ray_dir )
{
return normalize( ray_dir ) * 0.5f + 0.5f;
}
// Miss
extern "C" __global__ void __miss__raydir_shade()
{
const float3 ray_dir = optixGetWorldRayDirection();
float3 result = optixContinuationCall<float3, float3>( 0, ray_dir );
whitted::setPayloadResult( result );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCallablePrograms/optixCallablePrograms.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <cuda/sphere.h>
struct EmptyData
{
};
struct HitGroupData : sphere::SphereHitGroupData
{
unsigned int dc_index;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCompileWithTasks/optixCompileWithTasks.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <CompileWithTasks.h>
#include <sutil/Exception.h>
using namespace optix::CompileWithTasks;
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */)
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
OptixDeviceContext s_context = 0;
OptixDeviceContextOptions s_options = {};
OptixModuleCompileOptions s_moduleCompileOptions = {};
OptixPipelineCompileOptions s_pipelineCompileOptions = {};
unsigned int s_defaultLogLevel = 4;
OptixTaskExecutePool g_pool;
static void SetUp()
{
CUDA_CHECK( cudaFree( 0 ) );
void* handle;
OPTIX_CHECK( optixInitWithHandle( &handle ) );
s_options.logCallbackFunction = &context_log_cb;
s_options.logCallbackLevel = s_defaultLogLevel;
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &s_options, &s_context ) );
}
static void SetLoggingLevel( unsigned int level )
{
OPTIX_CHECK( optixDeviceContextSetLogCallback( s_context, &context_log_cb, 0, level ) );
}
static void TearDown()
{
OPTIX_CHECK( optixDeviceContextDestroy( s_context ) );
}
std::string readPTXFile( const std::string& filename )
{
std::ifstream input( filename.c_str(), std::ios::binary );
if( !input )
{
std::cerr << "ERROR: Failed to open PTX file '" << filename << "'\n";
exit( 1 );
}
std::vector<unsigned char> buffer( std::istreambuf_iterator<char>( input ), {} );
return std::string( buffer.begin(), buffer.end() );
}
struct Timer
{
Timer() { m_start = m_clock.now(); }
double elapsed() const
{
std::chrono::duration<double> e = m_clock.now() - m_start;
return e.count();
}
friend std::ostream& operator<<( std::ostream& out, const Timer& timer ) { return out << timer.elapsed(); }
std::chrono::high_resolution_clock m_clock;
std::chrono::high_resolution_clock::time_point m_start;
};
void compileModule( const std::string& ptx, int numIters = 1 )
{
OptixModule ptxModule;
Timer overallTimer;
for( int i = 0; i < numIters; ++i )
{
Timer iterTimer;
OPTIX_CHECK( optixModuleCreateFromPTX( s_context, &s_moduleCompileOptions, &s_pipelineCompileOptions,
ptx.c_str(), ptx.size(), 0, 0, &ptxModule ) );
if( i == 0 )
SetLoggingLevel( 0 );
std::cout << "iter[" << i << "] duration = " << iterTimer << " seconds\n";
}
double seconds = overallTimer.elapsed();
SetLoggingLevel( s_defaultLogLevel );
std::cout << "over all time " << seconds << " seconds, per iter average = " << seconds / numIters << "\n";
std::cout << "Successfully compiled\n";
}
void compileModuleWithTasks( const std::string& ptx, int numIters = 1 )
{
OptixModule ptxModule;
Timer overallTimer;
for( int i = 0; i < numIters; ++i )
{
Timer iterTimer;
OptixTask firstTask;
OPTIX_CHECK( optixModuleCreateFromPTXWithTasks( s_context, &s_moduleCompileOptions, &s_pipelineCompileOptions,
ptx.c_str(), ptx.size(), 0, 0, &ptxModule, &firstTask ) );
OPTIX_CHECK( g_pool.executeTaskAndWait( ptxModule, firstTask ) );
if( i == 0 )
SetLoggingLevel( 0 );
std::cout << "iter[" << i << "] duration = " << iterTimer << " seconds\n";
}
double seconds = overallTimer.elapsed();
SetLoggingLevel( s_defaultLogLevel );
std::cout << "over all time " << seconds << " seconds, per iter average = " << seconds / numIters << "\n";
std::cout << "Successfully compiled\n";
}
void printUsageAndExit( const std::string& argv0, bool doExit = true )
{
// These provide a rudimentary set of options and are by no means exhaustive to the
// set of compile options available to optixModuleCreateFromPTX.
std::cerr << "\nUsage : " << argv0 << " [options] <input_file.ptx>\n"
<< "App options:\n"
<< " -h | --help Print this usage message\n"
<< " -na | --num-attributes <N> Number of attribute values (up to 8, default 2)\n"
<< " -npv | --num-payload-values <N> Number of payload values (up to "
<< OPTIX_COMPILE_DEFAULT_MAX_PAYLOAD_VALUE_COUNT << ", default 2)\n"
<< " -npt | --num-payload-types <N> Number of payload types (up to "
<< OPTIX_COMPILE_DEFAULT_MAX_PAYLOAD_TYPE_COUNT << ", default 1)\n"
<< " -ni | --num-iters <N> Number of iterations to compile. > 1 disables disk cache (default 1)\n"
<< " -dt | --disable-tasks Disable compilation with tasks (default enabled)\n"
<< " -nt | --num-threads <N> Number of threads (default 1)\n"
<< " -mt | --max-num-tasks <N> Maximum number of additional tasks (default 2)\n"
<< std::endl;
if( doExit )
exit( 1 );
}
int main( int argc, char** argv )
{
bool useTasks = true;
int numThreads = 2;
int maxNumTasks = 2;
int numIters = 1;
std::string filename;
std::vector<OptixPayloadType> types;
std::vector<unsigned int> defaultPayloadSemantics;
if( argc < 2 )
{
std::cerr << "\nERROR: No input PTX file provided for compilation\n";
printUsageAndExit( argv[0] );
}
for( int i = 1; i < argc; ++i )
{
std::string arg( argv[i] );
if( arg == "-h" || arg == "--help" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "-na" || arg == "--num-attributes" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
s_pipelineCompileOptions.numAttributeValues = atoi( argv[++i] );
}
else if( arg == "-npv" || arg == "--num-payload-values" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
s_pipelineCompileOptions.numPayloadValues = atoi( argv[++i] );
}
else if( arg == "-npt" || arg == "--num-payload-types" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
int numTypes = atoi( argv[++i] );
types.resize( numTypes, {} );
defaultPayloadSemantics.resize( s_pipelineCompileOptions.numPayloadValues, 0 );
for( unsigned int& payloadSemantic : defaultPayloadSemantics )
{
payloadSemantic = OPTIX_PAYLOAD_SEMANTICS_TRACE_CALLER_READ_WRITE
| OPTIX_PAYLOAD_SEMANTICS_CH_READ_WRITE | OPTIX_PAYLOAD_SEMANTICS_MS_READ_WRITE
| OPTIX_PAYLOAD_SEMANTICS_AH_READ_WRITE | OPTIX_PAYLOAD_SEMANTICS_IS_READ_WRITE;
}
for( OptixPayloadType& type : types )
{
type.numPayloadValues = static_cast<unsigned int>( defaultPayloadSemantics.size() );
type.payloadSemantics = defaultPayloadSemantics.data();
}
s_pipelineCompileOptions.numPayloadValues = 0;
s_moduleCompileOptions.numPayloadTypes = numTypes;
s_moduleCompileOptions.payloadTypes = types.data();
}
else if( arg == "-ni" || arg == "--num-iters" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
numIters = atoi( argv[++i] );
}
else if( arg == "-dt" || arg == "--disable-tasks" )
{
useTasks = false;
}
else if( arg == "-nt" || arg == "--num-threads" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
numThreads = atoi( argv[++i] );
}
else if( arg == "-mt" || arg == "--max-num-tasks" )
{
if( i >= argc-1 )
printUsageAndExit( argv[0] );
maxNumTasks = atoi( argv[++i] );
}
else
{
filename = arg;
}
}
SetUp();
if( numIters > 1 )
optixDeviceContextSetCacheEnabled( s_context, 0 );
std::string ptx = readPTXFile( filename );
if( useTasks )
{
g_pool.m_threadPool.startPool( numThreads );
g_pool.m_maxNumAdditionalTasks = maxNumTasks;
compileModuleWithTasks( ptx, numIters );
g_pool.m_threadPool.terminate();
}
else
compileModule( ptx, numIters );
TearDown();
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCurves/optixCurves.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <cuda_runtime.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include "optixCurves.h"
#include <array>
#include <cassert>
#include <iomanip>
#include <iostream>
#include <string>
#include <cmath>
#include <sutil/Camera.h>
#include <sutil/Trackball.h>
template <typename T>
struct SbtRecord
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef SbtRecord<RayGenData> RayGenSbtRecord;
typedef SbtRecord<MissData> MissSbtRecord;
typedef SbtRecord<HitGroupData> HitGroupSbtRecord;
void configureCamera( sutil::Camera& cam, const uint32_t width, const uint32_t height )
{
cam.setEye( {0.0f, 0.0f, 2.0f} );
cam.setLookat( {0.0f, 0.0f, 0.0f} );
cam.setUp( {0.0f, 1.0f, 3.0f} );
cam.setFovY( 45.0f );
cam.setAspectRatio( (float)width / (float)height );
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> Specify file for image output\n";
std::cerr << " --help | -h Print this usage message\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 512x384\n";
std::cerr << " --deg | -d <deg> Specify polynomial degree of curve (default 3)\n";
std::cerr << " Valid options:\n";
std::cerr << " 1 - Linear curve segments/round caps,\n";
std::cerr << " 2 - Quadratic b-spline/flat caps,\n";
std::cerr << " 3 - Cubic b-spline/flat. caps\n";
std::cerr << " --rad | -r <rad> Specify radius of curve (default 0.4)\n";
std::cerr << " --mot | -m Render with motion blur\n";
exit( 1 );
}
int main( int argc, char* argv[] )
{
//
// Command-line parameter parsing
//
std::string outfile;
int width = 1024;
int height = 768;
int degree = 3;
float radius = 0.4f;
bool motion_blur = false;
for( int i = 1; i < argc; ++i )
{
const std::string arg( argv[i] );
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--file" || arg == "-f" )
{
if( i < argc - 1 )
{
outfile = argv[++i];
}
else
{
printUsageAndExit( argv[0] );
}
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
sutil::parseDimensions( dims_arg.c_str(), width, height );
}
else if( arg == "-d" || arg == "--deg" )
{
if( i < argc - 1 )
{
degree = atoi( argv[++i] );
if( 0 >= degree || degree > 3 )
{
std::cerr << "Curve degree must be in {1, 2, 3}.\n\n";
printUsageAndExit( argv[0] );
}
}
else
{
printUsageAndExit( argv[0] );
}
}
else if( arg == "-r" || arg == "--rad" )
{
if( i < argc - 1 )
{
radius = static_cast<float>( atof( argv[++i] ) );
}
else
{
printUsageAndExit( argv[0] );
}
}
else if( arg == "-m" || arg == "--mot" )
{
motion_blur = true;
}
else
{
std::cerr << "Unknown option '" << arg << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
char log[2048]; // For error reporting from OptiX creation functions
//
// Initialize CUDA and create OptiX context
//
OptixDeviceContext context = nullptr;
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
// Initialize the OptiX API, loading all API entry points
OPTIX_CHECK( optixInit() );
// Specify context options
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
// Associate a CUDA context (and therefore a specific GPU) with this
// device context
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
}
//
// accel handling
//
OptixTraversableHandle gas_handle;
CUdeviceptr d_gas_output_buffer;
{
// Number of motion keys
const int NUM_KEYS = 6;
// Use default options for simplicity. In a real use case we would want to
// enable compaction, etc
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
if( motion_blur) {
accel_options.motionOptions.numKeys = NUM_KEYS;
accel_options.motionOptions.timeBegin = 0.0f;
accel_options.motionOptions.timeEnd = 1.0f;
accel_options.motionOptions.flags = OPTIX_MOTION_FLAG_NONE;
}
// Curve build input: simple list of three/four vertices
std::vector<float3> vertices;
std::vector<float> widths;
SUTIL_ASSERT( radius > 0.0 );
for( int i = 0; i < NUM_KEYS; ++i ) {
// move the y-coordinates based on cosine
const float c = cosf(i / static_cast<float>(NUM_KEYS) * 2.0f * static_cast<float>(M_PI));
switch( degree ) {
case 1: {
vertices.push_back( make_float3( -0.25f, -0.25f * c, 0.0f ) );
widths.push_back( 0.3f );
vertices.push_back( make_float3( 0.25f, 0.25f * c, 0.0f ) );
widths.push_back( radius );
} break;
case 2: {
vertices.push_back( make_float3( -1.5f, -2.0f * c, 0.0f ) );
widths.push_back( .01f );
vertices.push_back( make_float3( 0.0f, 1.0f * c, 0.0f ) );
widths.push_back( radius );
vertices.push_back( make_float3( 1.5f, -2.0f * c, 0.0f ) );
widths.push_back( .01f );
} break;
case 3: {
vertices.push_back( make_float3( -1.5f, -3.5f * c, 0.0f ) );
widths.push_back( .01f );
vertices.push_back( make_float3( -1.0f, 0.5f * c, 0.0f ) );
widths.push_back( radius );
vertices.push_back( make_float3( 1.0f, 0.5f * c, 0.0f ) );
widths.push_back( radius );
vertices.push_back( make_float3( 1.5f, -3.5f * c, 0.0f ) );
widths.push_back( .01f );
} break;
default:
SUTIL_ASSERT_MSG( false, "Curve degree must be in {1, 2, 3}." );
}
}
const size_t vertices_size = sizeof( float3 ) * vertices.size();
CUdeviceptr d_vertices = 0;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_vertices ), vertices_size ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_vertices ), vertices.data(), vertices_size, cudaMemcpyHostToDevice ) );
const size_t widthsSize = sizeof( float ) * widths.size();
CUdeviceptr d_widths = 0;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_widths ), widthsSize ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_widths ), widths.data(), widthsSize, cudaMemcpyHostToDevice ) );
CUdeviceptr vertexBufferPointers[NUM_KEYS];
CUdeviceptr widthBufferPointers[NUM_KEYS];
for( int i = 0; i < NUM_KEYS; ++i ) {
vertexBufferPointers[i] = d_vertices + i * (degree + 1) * sizeof(float3);
widthBufferPointers[i] = d_widths + i * (degree + 1) * sizeof(float);
}
// Curve build intput: with a single segment the index array
// contains index of first vertex.
const std::array<int, 1> segmentIndices = {0};
const size_t segmentIndicesSize = sizeof( int ) * segmentIndices.size();
CUdeviceptr d_segementIndices = 0;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_segementIndices ), segmentIndicesSize ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_segementIndices ), segmentIndices.data(),
segmentIndicesSize, cudaMemcpyHostToDevice ) );
// Curve build input.
OptixBuildInput curve_input = {};
curve_input.type = OPTIX_BUILD_INPUT_TYPE_CURVES;
switch( degree ) {
case 1:
curve_input.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR;
break;
case 2:
curve_input.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE;
break;
case 3:
curve_input.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE;
break;
}
curve_input.curveArray.numPrimitives = 1;
curve_input.curveArray.vertexBuffers = vertexBufferPointers;
curve_input.curveArray.numVertices = static_cast<uint32_t>( vertices.size() );
curve_input.curveArray.vertexStrideInBytes = sizeof( float3 );
curve_input.curveArray.widthBuffers = widthBufferPointers;
curve_input.curveArray.widthStrideInBytes = sizeof( float );
curve_input.curveArray.normalBuffers = 0;
curve_input.curveArray.normalStrideInBytes = 0;
curve_input.curveArray.indexBuffer = d_segementIndices;
curve_input.curveArray.indexStrideInBytes = sizeof( int );
curve_input.curveArray.flag = OPTIX_GEOMETRY_FLAG_NONE;
curve_input.curveArray.primitiveIndexOffset = 0;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( context, &accel_options, &curve_input,
1, // Number of build inputs
&gas_buffer_sizes ) );
CUdeviceptr d_temp_buffer_gas;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer_gas ), gas_buffer_sizes.tempSizeInBytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_gas_output_buffer ), gas_buffer_sizes.outputSizeInBytes ) );
OPTIX_CHECK( optixAccelBuild( context, 0, // CUDA stream
&accel_options, &curve_input,
1, // num build inputs
d_temp_buffer_gas, gas_buffer_sizes.tempSizeInBytes, d_gas_output_buffer,
gas_buffer_sizes.outputSizeInBytes, &gas_handle,
nullptr, // emitted property list
0 ) ); // num emitted properties
// We can now free the scratch space buffer used during build and the vertex
// inputs, since they are not needed by our trivial shading method
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer_gas ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_vertices ) ) );
}
//
// Create modules
//
OptixModule shading_module = nullptr;
OptixModule geometry_module = nullptr;
OptixPipelineCompileOptions pipeline_compile_options = {};
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
pipeline_compile_options.usesMotionBlur = motion_blur; // enable motion-blur in pipeline
pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
pipeline_compile_options.numPayloadValues = 3;
pipeline_compile_options.numAttributeValues = 1;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
pipeline_compile_options.exceptionFlags =
OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
switch( degree )
{
case 1:
pipeline_compile_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_LINEAR;
break;
case 2:
pipeline_compile_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_QUADRATIC_BSPLINE;
break;
case 3:
pipeline_compile_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE;
break;
}
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixCurves.cu", inputSize );
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( context, &module_compile_options, &pipeline_compile_options,
input, inputSize, log, &sizeof_log, &shading_module ) );
OptixBuiltinISOptions builtinISOptions = {};
switch( degree )
{
case 1:
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR;
break;
case 2:
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE;
break;
case 3:
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE;
break;
}
builtinISOptions.usesMotionBlur = motion_blur; // enable motion-blur for built-in intersector
OPTIX_CHECK( optixBuiltinISModuleGet( context, &module_compile_options, &pipeline_compile_options,
&builtinISOptions, &geometry_module ) );
}
//
// Create program groups
//
OptixProgramGroup raygen_prog_group = nullptr;
OptixProgramGroup miss_prog_group = nullptr;
OptixProgramGroup hitgroup_prog_group = nullptr;
{
OptixProgramGroupOptions program_group_options = {}; // Initialize to zeros
OptixProgramGroupDesc raygen_prog_group_desc = {}; //
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = shading_module;
if( motion_blur )
{
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__motion_blur";
}
else
{
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__basic";
}
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &raygen_prog_group ) );
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = shading_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__ms";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( context, &miss_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &miss_prog_group ) );
OptixProgramGroupDesc hitgroup_prog_group_desc = {};
hitgroup_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hitgroup_prog_group_desc.hitgroup.moduleCH = shading_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__ch";
hitgroup_prog_group_desc.hitgroup.moduleIS = geometry_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameIS = 0; // automatically supplied for built-in module
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( context, &hitgroup_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &hitgroup_prog_group ) );
}
//
// Link pipeline
//
OptixPipeline pipeline = nullptr;
{
const uint32_t max_trace_depth = 1;
OptixProgramGroup program_groups[] = {raygen_prog_group, miss_prog_group, hitgroup_prog_group};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate( context, &pipeline_compile_options, &pipeline_link_options,
program_groups, sizeof( program_groups ) / sizeof( program_groups[0] ),
log, &sizeof_log, &pipeline ) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
//
// Set up shader binding table
//
OptixShaderBindingTable sbt = {};
{
CUdeviceptr raygen_record;
const size_t raygen_record_size = sizeof( RayGenSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &raygen_record ), raygen_record_size ) );
RayGenSbtRecord rg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( raygen_record ), &rg_sbt, raygen_record_size, cudaMemcpyHostToDevice ) );
CUdeviceptr miss_record;
size_t miss_record_size = sizeof( MissSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &miss_record ), miss_record_size ) );
MissSbtRecord ms_sbt;
ms_sbt.data = {0.0f, 0.2f, 0.6f}; // background color (blue)
OPTIX_CHECK( optixSbtRecordPackHeader( miss_prog_group, &ms_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( miss_record ), &ms_sbt, miss_record_size, cudaMemcpyHostToDevice ) );
CUdeviceptr hitgroup_record;
size_t hitgroup_record_size = sizeof( HitGroupSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &hitgroup_record ), hitgroup_record_size ) );
HitGroupSbtRecord hg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( hitgroup_prog_group, &hg_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( hitgroup_record ), &hg_sbt, hitgroup_record_size, cudaMemcpyHostToDevice ) );
sbt.raygenRecord = raygen_record;
sbt.missRecordBase = miss_record;
sbt.missRecordStrideInBytes = sizeof( MissSbtRecord );
sbt.missRecordCount = 1;
sbt.hitgroupRecordBase = hitgroup_record;
sbt.hitgroupRecordStrideInBytes = sizeof( HitGroupSbtRecord );
sbt.hitgroupRecordCount = 1;
}
sutil::CUDAOutputBuffer<uchar4> output_buffer( sutil::CUDAOutputBufferType::CUDA_DEVICE, width, height );
//
// launch
//
{
CUstream stream;
CUDA_CHECK( cudaStreamCreate( &stream ) );
sutil::Camera cam;
configureCamera( cam, width, height );
Params params;
params.image = output_buffer.map();
params.image_width = width;
params.image_height = height;
params.handle = gas_handle;
params.cam_eye = cam.eye();
cam.UVWFrame( params.cam_u, params.cam_v, params.cam_w );
CUdeviceptr d_param;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_param ), sizeof( Params ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_param ), ¶ms, sizeof( params ), cudaMemcpyHostToDevice ) );
OPTIX_CHECK( optixLaunch( pipeline, stream, d_param, sizeof( Params ), &sbt, width, height, /*depth=*/1 ) );
CUDA_SYNC_CHECK();
output_buffer.unmap();
}
//
// Display results
//
{
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = width;
buffer.height = height;
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
if( outfile.empty() )
sutil::displayBufferWindow( argv[0], buffer );
else
sutil::saveImage( outfile.c_str(), buffer, false );
}
//
// Cleanup
//
{
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_gas_output_buffer ) ) );
OPTIX_CHECK( optixPipelineDestroy( pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( hitgroup_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( miss_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( raygen_prog_group ) );
OPTIX_CHECK( optixModuleDestroy( shading_module ) );
OPTIX_CHECK( optixModuleDestroy( geometry_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( context ) );
}
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCurves/optixCurves.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixCurves.h"
#include <cuda/helpers.h>
#include <random.h>
#include <sutil/vec_math.h>
extern "C" {
__constant__ Params params;
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
static __forceinline__ __device__ void computeRay( uint3 idx, uint3 dim, float3& origin, float3& direction )
{
const float3 U = params.cam_u;
const float3 V = params.cam_v;
const float3 W = params.cam_w;
const float2 d = 2.0f * make_float2(
static_cast<float>( idx.x ) / static_cast<float>( dim.x ),
static_cast<float>( idx.y ) / static_cast<float>( dim.y )
) - 1.0f;
origin = params.cam_eye;
direction = normalize( d.x * U + d.y * V + W );
}
extern "C" __global__ void __raygen__basic()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
optixTrace(
params.handle,
ray_origin,
ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
0.0f, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
float3 result;
result.x = int_as_float( p0 );
result.y = int_as_float( p1 );
result.z = int_as_float( p2 );
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result );
}
extern "C" __global__ void __raygen__motion_blur()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
const int NUM_SAMPLES = 100;
float3 result = {};
unsigned int seed = tea<4>(idx.y * dim.y + dim.x, idx.x);
for( int i = 0; i < NUM_SAMPLES; ++i )
{
const float ray_time = rnd(seed); // compute next random ray time in [0, 1[
optixTrace( params.handle, ray_origin, ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
ray_time, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
result.x += int_as_float( p0 );
result.y += int_as_float( p1 );
result.z += int_as_float( p2 );
}
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result / NUM_SAMPLES );
}
extern "C" __global__ void __miss__ms()
{
MissData* miss_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayload( miss_data->bg_color );
}
extern "C" __global__ void __closesthit__ch()
{
// When built-in curve intersection is used, the curve parameter u is provided
// by the OptiX API. The parameter’s range is [0,1] over the curve segment,
// with u=0 or u=1 only on the end caps.
float u = optixGetCurveParameter();
// linearly interpolate from black to orange
setPayload( make_float3( u, u / 3.0f, 0.0f ) );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCurves/optixCurves.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
struct Params
{
uchar4* image;
unsigned int image_width;
unsigned int image_height;
float3 cam_eye;
float3 cam_u, cam_v, cam_w;
OptixTraversableHandle handle;
};
struct RayGenData
{
// No data needed
};
struct MissData
{
float3 bg_color;
};
struct HitGroupData
{
// No data needed
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCutouts/optixCutouts.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/Camera.h>
#include <sutil/Trackball.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <GLFW/glfw3.h>
#include "optixCutouts.h"
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
bool use_pbo = true;
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int2 mouse_prev_pos;
int32_t mouse_button = -1;
int32_t samples_per_launch = 16;
//------------------------------------------------------------------------------
//
// Local types
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct Vertex
{
float x, y, z, pad;
};
struct Instance
{
float transform[12];
};
struct CutoutsState
{
OptixDeviceContext context = 0;
OptixTraversableHandle triangle_gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_triangle_gas_output_buffer = 0; // Triangle AS memory
CUdeviceptr d_vertices = 0;
CUdeviceptr d_tex_coords = 0;
OptixTraversableHandle sphere_gas_handle = 0; // Traversable handle for sphere AS
CUdeviceptr d_sphere_gas_output_buffer = 0; // Sphere AS memory
OptixTraversableHandle ias_handle = 0; // Traversable handle for instance AS
CUdeviceptr d_ias_output_buffer = 0; // Instance AS memory
OptixModule ptx_module = 0;
OptixModule sphere_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup radiance_miss_group = 0;
OptixProgramGroup occlusion_miss_group = 0;
OptixProgramGroup radiance_hit_group = 0;
OptixProgramGroup occlusion_hit_group = 0;
CUstream stream = 0;
Params params;
Params* d_params;
OptixShaderBindingTable sbt = {};
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t TRIANGLE_COUNT = 32;
const int32_t TRIANGLE_MAT_COUNT = 5;
const int32_t SPHERE_COUNT = 1;
const int32_t SPHERE_MAT_COUNT = 1;
const static std::array<Vertex, TRIANGLE_COUNT*3> g_vertices =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
// Left wall -- red lambert
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
// Short block -- white lambert
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
// Tall block -- white lambert
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
// Ceiling light -- emmissive
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 332.0f, 0.0f }
} };
static std::array<uint32_t, TRIANGLE_COUNT> g_mat_indices =
{ {
0, 0, // Floor -- white lambert
0, 0, // Ceiling -- white lambert
0, 0, // Back wall -- white lambert
1, 1, // Right wall -- green lambert
2, 2, // Left wall -- red lambert
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // Short block -- cutout
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Tall block -- white lambert
3, 3 // Ceiling light -- emmissive
} };
const std::array<float3, TRIANGLE_MAT_COUNT> g_emission_colors =
{ {
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 15.0f, 15.0f, 5.0f },
{ 0.0f, 0.0f, 0.0f }
} };
const std::array<float3, TRIANGLE_MAT_COUNT> g_diffuse_colors =
{ {
{ 0.80f, 0.80f, 0.80f },
{ 0.05f, 0.80f, 0.05f },
{ 0.80f, 0.05f, 0.05f },
{ 0.50f, 0.00f, 0.00f },
{ 0.70f, 0.25f, 0.00f }
} };
// NB: Some UV scaling is baked into the coordinates for the short block, since
// the coordinates are used for the cutout texture.
const std::array<float2, TRIANGLE_COUNT* 3> g_tex_coords =
{ {
// Floor
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
// Ceiling
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
// Back wall
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
// Right wall
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
// Left wall
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
// Short Block
{ 8.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 8.0f }, { 8.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 8.0f }, { 8.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 8.0f }, { 8.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 8.0f }, { 8.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 8.0f },
{ 8.0f, 0.0f }, { 0.0f, 8.0f }, { 8.0f, 8.0f },
// Tall Block
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f },
{ 1.0f, 0.0f }, { 1.0f, 1.0f }, { 0.0f, 1.0f },
{ 0.0f, 1.0f }, { 1.0f, 0.0f }, { 1.0f, 1.0f },
{ 1.0f, 0.0f }, { 1.0f, 1.0f }, { 0.0f, 1.0f },
{ 0.0f, 1.0f }, { 1.0f, 0.0f }, { 1.0f, 1.0f },
{ 1.0f, 0.0f }, { 1.0f, 1.0f }, { 0.0f, 1.0f },
{ 0.0f, 1.0f }, { 1.0f, 0.0f }, { 1.0f, 1.0f },
{ 1.0f, 0.0f }, { 1.0f, 1.0f }, { 0.0f, 1.0f },
{ 0.0f, 1.0f }, { 1.0f, 0.0f }, { 1.0f, 1.0f },
// Ceiling light
{ 1.0f, 0.0f }, { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 0.0f, 1.0f }, { 1.0f, 1.0f }
} };
const GeometryData::Sphere g_sphere = {410.0f, 90.0f, 110.0f, 90.0f};
const float3 g_sphere_emission_color = {0.0f};
const float3 g_sphere_diffuse_color = {0.1f, 0.2f, 0.8f};
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking(static_cast<int>( xpos ), static_cast<int>( ypos ));
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
Params* params = static_cast<Params*>( glfwGetWindowUserPointer( window ) );
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params->width, params->height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params->width, params->height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
Params* params = static_cast<Params*>( glfwGetWindowUserPointer( window ) );
params->width = res_x;
params->height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if(trackball.wheelEvent((int)yscroll))
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --launch-samples | -s Number of samples per pixel per launch (default 16)\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 768x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( CutoutsState& state )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.params.accum_buffer ),
state.params.width*state.params.height*sizeof(float4) ) );
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.samples_per_launch = samples_per_launch;
state.params.subframe_index = 0u;
state.params.light.emission = make_float3( 15.0f, 15.0f, 5.0f );
state.params.light.corner = make_float3( 343.0f, 548.5f, 227.0f );
state.params.light.v1 = make_float3( 0.0f, 0.0f, 105.0f );
state.params.light.v2 = make_float3( -130.0f, 0.0f, 0.0f );
state.params.light.normal = normalize ( cross( state.params.light.v1, state.params.light.v2) );
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params ), sizeof( Params ) ) );
state.params.handle = state.ias_handle;
}
void handleCameraUpdate( Params& params )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( params.width ) / static_cast<float>( params.height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
// Realloc accumulation buffer
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( ¶ms.accum_buffer ),
params.width*params.height*sizeof(float4) ) );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
// Update params on device
if( camera_changed || resize_dirty )
params.subframe_index = 0;
handleCameraUpdate( params );
handleResize( output_buffer, params );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, CutoutsState& state )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( state.d_params ),
&state.params,
sizeof( Params ),
cudaMemcpyHostToDevice,
state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe(
sutil::CUDAOutputBuffer<uchar4>& output_buffer,
sutil::GLDisplay& gl_display,
GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */)
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
void initCameraState()
{
camera.setEye( make_float3( 278.0f, 273.0f, -900.0f ) );
camera.setLookat( make_float3( 278.0f, 273.0f, 330.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ),
make_float3( 0.0f, 0.0f, 1.0f ),
make_float3( 0.0f, 1.0f, 0.0f ) );
trackball.setGimbalLock(true);
}
void createContext( CutoutsState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
state.context = context;
}
void buildGeomAccel( CutoutsState& state )
{
//
// Build triangle GAS
//
{
const size_t vertices_size_in_bytes = g_vertices.size() * sizeof( Vertex );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_vertices ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.d_vertices ), g_vertices.data(), vertices_size_in_bytes,
cudaMemcpyHostToDevice ) );
CUdeviceptr d_mat_indices = 0;
const size_t mat_indices_size_in_bytes = g_mat_indices.size() * sizeof( uint32_t );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_mat_indices ), mat_indices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_mat_indices ), g_mat_indices.data(),
mat_indices_size_in_bytes, cudaMemcpyHostToDevice ) );
const size_t tex_coords_size_in_bytes = g_tex_coords.size() * sizeof( float2 );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_tex_coords ), tex_coords_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.d_tex_coords ), g_tex_coords.data(),
tex_coords_size_in_bytes, cudaMemcpyHostToDevice ) );
uint32_t triangle_input_flags[TRIANGLE_MAT_COUNT] = {
// One per SBT record for this build input
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
// Do not disable anyhit on the cutout material for the short block
OPTIX_GEOMETRY_FLAG_NONE
};
OptixBuildInput triangle_input = {};
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes = sizeof( Vertex );
triangle_input.triangleArray.numVertices = static_cast<uint32_t>( g_vertices.size() );
triangle_input.triangleArray.vertexBuffers = &state.d_vertices;
triangle_input.triangleArray.flags = triangle_input_flags;
triangle_input.triangleArray.numSbtRecords = TRIANGLE_MAT_COUNT;
triangle_input.triangleArray.sbtIndexOffsetBuffer = d_mat_indices;
triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = sizeof( uint32_t );
triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = sizeof( uint32_t );
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accel_options, &triangle_input,
1, // num_build_inputs
&gas_buffer_sizes ) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&triangle_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.triangle_gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_mat_indices ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy(
&compacted_gas_size,
(void*)emitProperty.result,
sizeof( size_t ),
cudaMemcpyDeviceToHost
) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_triangle_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.triangle_gas_handle, state.d_triangle_gas_output_buffer, compacted_gas_size, &state.triangle_gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_triangle_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
//
// Build sphere GAS
//
{
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
// AABB build input
float3 m_min = g_sphere.center - g_sphere.radius;
float3 m_max = g_sphere.center + g_sphere.radius;
OptixAabb aabb = {m_min.x, m_min.y, m_min.z, m_max.x, m_max.y, m_max.z};
CUdeviceptr d_aabb_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_aabb_buffer ), sizeof( OptixAabb ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_aabb_buffer ), &aabb, sizeof( OptixAabb ),
cudaMemcpyHostToDevice ) );
uint32_t sphere_input_flag = OPTIX_GEOMETRY_FLAG_NONE;
OptixBuildInput sphere_input = {};
sphere_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
sphere_input.customPrimitiveArray.aabbBuffers = &d_aabb_buffer;
sphere_input.customPrimitiveArray.numPrimitives = 1;
sphere_input.customPrimitiveArray.flags = &sphere_input_flag;
sphere_input.customPrimitiveArray.numSbtRecords = 1;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context,
&accel_options,
&sphere_input,
1, // num_build_inputs
&gas_buffer_sizes ) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ), compactedSizeOffset + 8 ) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild( state.context,
0, // CUDA stream
&accel_options,
&sphere_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.sphere_gas_handle,
&emitProperty, // emitted property list
1 ) ); // num emitted properties
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_aabb_buffer ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_sphere_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.sphere_gas_handle, state.d_sphere_gas_output_buffer, compacted_gas_size, &state.sphere_gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_sphere_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
}
void buildInstanceAccel( CutoutsState& state )
{
CUdeviceptr d_instances;
size_t instance_size_in_bytes = sizeof( OptixInstance ) * 2;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_instances ), instance_size_in_bytes ) );
OptixBuildInput instance_input = {};
instance_input.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
instance_input.instanceArray.instances = d_instances;
instance_input.instanceArray.numInstances = 2;
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_NONE;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes ias_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accel_options, &instance_input,
1, // num build inputs
&ias_buffer_sizes ) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), ias_buffer_sizes.tempSizeInBytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_ias_output_buffer ), ias_buffer_sizes.outputSizeInBytes ) );
// Use the identity matrix for the instance transform
Instance instance = { { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0 } };
OptixInstance optix_instances[2];
memset( optix_instances, 0, instance_size_in_bytes );
optix_instances[0].traversableHandle = state.triangle_gas_handle;
optix_instances[0].flags = OPTIX_INSTANCE_FLAG_NONE;
optix_instances[0].instanceId = 0;
optix_instances[0].sbtOffset = 0;
optix_instances[0].visibilityMask = 1;
memcpy( optix_instances[0].transform, instance.transform, sizeof( float ) * 12 );
optix_instances[1].traversableHandle = state.sphere_gas_handle;
optix_instances[1].flags = OPTIX_INSTANCE_FLAG_NONE;
optix_instances[1].instanceId = 1;
optix_instances[1].sbtOffset = TRIANGLE_MAT_COUNT*RAY_TYPE_COUNT;
optix_instances[1].visibilityMask = 1;
memcpy( optix_instances[1].transform, instance.transform, sizeof( float ) * 12 );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_instances ), &optix_instances, instance_size_in_bytes,
cudaMemcpyHostToDevice ) );
OPTIX_CHECK( optixAccelBuild( state.context,
0, // CUDA stream
&accel_options,
&instance_input,
1, // num build inputs
d_temp_buffer,
ias_buffer_sizes.tempSizeInBytes,
state.d_ias_output_buffer,
ias_buffer_sizes.outputSizeInBytes,
&state.ias_handle,
nullptr, // emitted property list
0 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_instances ) ) );
}
void createModule( CutoutsState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = 100;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY;
state.pipeline_compile_options.numPayloadValues = 2;
state.pipeline_compile_options.numAttributeValues = sphere::NUM_ATTRIBUTE_VALUES;
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE; // should be OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixCutouts.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module
) );
input = sutil::getInputData( nullptr, nullptr, "sphere.cu", inputSize );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.sphere_module
) );
}
void createProgramGroups( CutoutsState& state )
{
OptixProgramGroupOptions program_group_options = {};
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context,
&raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.raygen_prog_group ) );
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__radiance";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.radiance_miss_group ) );
memset( &miss_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = nullptr; // NULL miss program for occlusion rays
miss_prog_group_desc.miss.entryFunctionName = nullptr;
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.occlusion_miss_group ) );
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
hit_prog_group_desc.hitgroup.moduleAH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameAH = "__anyhit__ah";
hit_prog_group_desc.hitgroup.moduleIS = state.sphere_module;
hit_prog_group_desc.hitgroup.entryFunctionNameIS = "__intersection__sphere";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.radiance_hit_group ) );
memset( &hit_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
hit_prog_group_desc.hitgroup.moduleAH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameAH = "__anyhit__ah";
hit_prog_group_desc.hitgroup.moduleIS = state.sphere_module;
hit_prog_group_desc.hitgroup.entryFunctionNameIS = "__intersection__sphere";
sizeof_log = sizeof( log );
OPTIX_CHECK( optixProgramGroupCreate( state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_hit_group ) );
}
void createPipeline( CutoutsState& state )
{
const uint32_t max_trace_depth = 2;
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.radiance_miss_group,
state.occlusion_miss_group,
state.radiance_hit_group,
state.occlusion_hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate( state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline ) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
void createSBT( CutoutsState& state )
{
CUdeviceptr d_raygen_record;
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_raygen_record ), raygen_record_size ) );
RayGenRecord rg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
rg_sbt.data = {1.0f, 0.f, 0.f};
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_raygen_record ), &rg_sbt, raygen_record_size,
cudaMemcpyHostToDevice ) );
CUdeviceptr d_miss_records;
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_miss_records ), miss_record_size * RAY_TYPE_COUNT ) );
MissRecord ms_sbt[2];
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_miss_group, &ms_sbt[0] ) );
ms_sbt[0].data = {0.0f, 0.0f, 0.0f};
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_miss_group, &ms_sbt[1] ) );
ms_sbt[1].data = {0.0f, 0.0f, 0.0f};
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_miss_records ), ms_sbt, miss_record_size * RAY_TYPE_COUNT,
cudaMemcpyHostToDevice ) );
CUdeviceptr d_hitgroup_records;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_hitgroup_records ),
hitgroup_record_size * ( RAY_TYPE_COUNT * ( TRIANGLE_MAT_COUNT + SPHERE_MAT_COUNT ) ) ) );
HitGroupRecord hitgroup_records[RAY_TYPE_COUNT * ( TRIANGLE_MAT_COUNT + SPHERE_MAT_COUNT )];
// Set up the HitGroupRecords for the triangle materials
for( int i = 0; i < TRIANGLE_MAT_COUNT; ++i )
{
{
const int sbt_idx = i*RAY_TYPE_COUNT+0; // SBT for radiance ray-type for ith material
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.emission_color = g_emission_colors[i];
hitgroup_records[ sbt_idx ].data.diffuse_color = g_diffuse_colors[i];
hitgroup_records[ sbt_idx ].data.vertices = reinterpret_cast<float4*>(state.d_vertices);
hitgroup_records[ sbt_idx ].data.tex_coords = reinterpret_cast<float2*>(state.d_tex_coords);
}
{
const int sbt_idx = i*RAY_TYPE_COUNT+1; // SBT for occlusion ray-type for ith material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.vertices = reinterpret_cast<float4*>(state.d_vertices);
hitgroup_records[ sbt_idx ].data.tex_coords = reinterpret_cast<float2*>(state.d_tex_coords);
}
}
// Set up the HitGroupRecords for the sphere material
{
const int sbt_idx = TRIANGLE_MAT_COUNT * RAY_TYPE_COUNT+0; // SBT for radiance ray-type for sphere material
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.emission_color = g_sphere_emission_color;
hitgroup_records[ sbt_idx ].data.diffuse_color = g_sphere_diffuse_color;
hitgroup_records[ sbt_idx ].data.sphere = g_sphere;
}
{
const int sbt_idx = TRIANGLE_MAT_COUNT * RAY_TYPE_COUNT+1; // SBT for occlusion ray-type for sphere material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.sphere = g_sphere;
}
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_hitgroup_records ), hitgroup_records,
hitgroup_record_size * ( RAY_TYPE_COUNT * ( TRIANGLE_MAT_COUNT + SPHERE_MAT_COUNT ) ),
cudaMemcpyHostToDevice ) );
state.sbt.raygenRecord = d_raygen_record;
state.sbt.missRecordBase = d_miss_records;
state.sbt.missRecordStrideInBytes = static_cast<uint32_t>( miss_record_size );
state.sbt.missRecordCount = RAY_TYPE_COUNT;
state.sbt.hitgroupRecordBase = d_hitgroup_records;
state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>( hitgroup_record_size );
state.sbt.hitgroupRecordCount = RAY_TYPE_COUNT * ( TRIANGLE_MAT_COUNT + SPHERE_MAT_COUNT );
}
void cleanupState( CutoutsState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_miss_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixModuleDestroy( state.sphere_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_tex_coords ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_triangle_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_sphere_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.params.accum_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_ias_output_buffer ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
CutoutsState state;
state.params.width = 768;
state.params.height = 768;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
use_pbo = false;
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext ( state );
buildGeomAccel ( state );
buildInstanceAccel ( state );
createModule ( state );
createProgramGroups( state );
createPipeline ( state );
createSBT ( state );
initLaunchParams( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixCutouts", state.params.width, state.params.height );
glfwSetMouseButtonCallback ( window, mouseButtonCallback );
glfwSetCursorPosCallback ( window, cursorPosCallback );
glfwSetWindowSizeCallback ( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback ( window, keyCallback );
glfwSetScrollCallback ( window, scrollCallback );
glfwSetWindowUserPointer ( window, &state.params );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.width, state.params.height );
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateState( output_buffer, state.params );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers(window);
++state.params.subframe_index;
}
while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( use_pbo)
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer(output_buffer_type, state.params.width, state.params.height);
handleCameraUpdate(state.params);
handleResize(output_buffer, state.params);
launchSubframe(output_buffer, state);
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage(outfile.c_str(), buffer, false);
glfwTerminate();
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCutouts/optixCutouts.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "optixCutouts.h"
#include <cuda/random.h>
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb( const float3& normal )
{
m_normal = normal;
if( fabs( m_normal.x ) > fabs( m_normal.z ) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize( m_binormal );
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform( float3& p ) const
{
p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) );
}
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void cosine_sample_hemisphere( const float u1, const float u2, float3& p )
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f * M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x * p.x - p.y * p.y ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b );
prd->done = true;
}
extern "C" __global__ void __anyhit__ah()
{
const unsigned int hit_kind = optixGetHitKind();
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
// The texture coordinates are defined per-vertex for built-in triangles,
// and are derived from the surface normal for our custom sphere geometry.
float3 texcoord;
if( optixIsTriangleHit() )
{
const int vert_idx_offset = prim_idx*3;
const float2 barycentrics = optixGetTriangleBarycentrics();
const float2 t0 = rt_data->tex_coords[ vert_idx_offset+0 ];
const float2 t1 = rt_data->tex_coords[ vert_idx_offset+1 ];
const float2 t2 = rt_data->tex_coords[ vert_idx_offset+2 ];
texcoord = make_float3( t0 * (1.0f - barycentrics.x - barycentrics.y) +
t1 * barycentrics.x +
t2 * barycentrics.y );
}
else
{
const float3 normal = make_float3( int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ) );
// TODO: Pass UV scale in SBT?
const float uv_scale = 16.0f;
const float u = uv_scale * ( 0.5f + atan2f( normal.z, normal.x ) * 0.5f * M_1_PIf );
const float v = uv_scale * ( 0.5f - asinf( normal.y ) * M_1_PIf );
texcoord = make_float3( u, v, 0.0f );
}
int which_check = (static_cast<int>(texcoord.x) + static_cast<int>(texcoord.y)) & 1;
if( which_check == 0 )
{
optixIgnoreIntersection();
}
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
RadiancePRD* prd = getPRD();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const unsigned int hit_kind = optixGetHitKind();
float3 N;
if( optixIsTriangleHit() )
{
const float3 v0 = make_float3( rt_data->vertices[vert_idx_offset + 0] );
const float3 v1 = make_float3( rt_data->vertices[vert_idx_offset + 1] );
const float3 v2 = make_float3( rt_data->vertices[vert_idx_offset + 2] );
const float3 N_0 = normalize( cross( v1 - v0, v2 - v0 ) );
N = faceforward( N_0, -ray_dir, N_0 );
}
else
{
N = make_float3(int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ));
}
prd->emitted = ( prd->countEmitted ) ? rt_data->emission_color : make_float3( 0.0f );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir;
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - P );
const float3 L = normalize(light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length(cross(light.v1, light.v2));
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixCutouts/optixCutouts.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <sutil/vec_math.h>
#include <cuda/sphere.h>
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT
};
struct ParallelogramLight
{
float3 corner;
float3 v1, v2;
float3 normal;
float3 emission;
};
struct Params
{
unsigned int subframe_index;
float4* accum_buffer;
uchar4* frame_buffer;
unsigned int width;
unsigned int height;
unsigned int samples_per_launch;
float3 eye;
float3 U;
float3 V;
float3 W;
ParallelogramLight light; // TODO: make light list
OptixTraversableHandle handle;
};
struct RayGenData
{
float r, g, b;
};
struct MissData
{
float r, g, b;
};
struct HitGroupData : sphere::SphereHitGroupData
{
float3 emission_color;
float3 diffuse_color;
float4* vertices;
float2* tex_coords;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDemandLoadSimple/PageRequester.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <DemandLoading/Paging.h>
using namespace demandLoading;
__global__ static void pageRequester( DeviceContext context, unsigned int pageBegin, unsigned int pageEnd )
{
unsigned int numPages = pageEnd - pageBegin;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index >= numPages )
return;
unsigned int pageId = pageBegin + index;
bool isResident;
pagingMapOrRequest( context, pageId, &isResident );
}
__host__ void launchPageRequester( cudaStream_t stream, const DeviceContext& context, unsigned int pageBegin, unsigned int pageEnd )
{
unsigned int threadsPerBlock = 32;
unsigned int numPages = pageEnd - pageBegin;
unsigned int numBlocks = ( numPages + threadsPerBlock - 1 ) / threadsPerBlock;
// The DeviceContext is passed by value to the kernel, so it is copied to device memory when the kernel is launched.
pageRequester<<<numBlocks, threadsPerBlock, 0U, stream>>>( context, pageBegin, pageEnd );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDemandLoadSimple/optixDemandLoadSimple.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <DemandLoading/DemandLoader.h>
#include <cuda_runtime.h>
#include <atomic>
#include <cstdio>
#include <exception>
#include <stdexcept>
using namespace demandLoading;
// Launch kernel. Defined in optixDemandLoadSimple.cu
void launchPageRequester( cudaStream_t stream, const DeviceContext& context, unsigned int pageBegin, unsigned int pageEnd );
// Check status returned by a CUDA call.
inline void check( cudaError_t status )
{
if( status != cudaSuccess )
throw std::runtime_error( cudaGetErrorString( status ) );
}
// Global count of requests processed.
std::atomic<int> g_numRequestsProcessed( 0 );
// This callback is invoked by the demand loading library when a page request is processed.
void* callback( unsigned int deviceIndex, cudaStream_t stream, unsigned int pageIndex )
{
++g_numRequestsProcessed;
return nullptr;
}
int main()
{
// Create DemandLoader
DemandLoader* loader = createDemandLoader( Options() );
// Create a resource, using the given callback to handle page requests.
const unsigned int numPages = 128;
unsigned int startPage = loader->createResource( numPages, callback );
// Create a stream on the first supported device, which is used for asynchronous operations.
unsigned int deviceIndex = loader->getDevices().at( 0 );
check( cudaSetDevice( deviceIndex ) );
cudaStream_t stream;
check( cudaStreamCreate( &stream ) );
// Process all the pages of the resource in batches.
const unsigned int batchSize = 32;
unsigned int numLaunches = 0;
for( unsigned int currentPage = startPage; currentPage < startPage + numPages; )
{
// Prepare for launch, obtaining DeviceContext.
DeviceContext context;
loader->launchPrepare( deviceIndex, stream, context );
// Launch the kernel.
launchPageRequester( stream, context, currentPage, currentPage + batchSize );
++numLaunches;
// Initiate request processing, which returns a Ticket.
Ticket ticket = loader->processRequests( deviceIndex, stream, context );
// Wait for any page requests to be processed.
ticket.wait();
// Advance the loop counter only when there were no page requests.
if( ticket.numTasksTotal() == 0 )
currentPage += batchSize;
}
printf( "Processed %i requests in %i launches.\n", g_numRequestsProcessed.load(), numLaunches );
// Clean up
check( cudaStreamDestroy( stream ) );
destroyDemandLoader( loader );
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDemandTexture/optixDemandTexture.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "optixDemandTexture.h"
#include <DemandLoading/DemandLoader.h>
#include <DemandLoading/DemandTexture.h>
#include <DemandLoading/TextureDescriptor.h>
#include <ImageReader/CheckerBoardImage.h>
#ifdef OPTIX_SAMPLE_USE_OPEN_EXR
#include <ImageReader/EXRReader.h>
#endif
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <cuda_runtime.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/sutil.h>
#include <cassert>
#include <iomanip>
#include <iostream>
#include <memory>
#include <string>
using namespace demandLoading;
using namespace imageReader;
int g_numThreads = 0;
int g_totalLaunches = 0;
double g_totalLaunchTime = 0.0;
unsigned int g_totalRequests = 0;
float g_mipLevelBias = 0.0f;
int32_t g_width = 768;
int32_t g_height = 768;
int32_t g_bucketSize = 256;
int g_textureWidth = 2048;
int g_textureHeight = 2048;
sutil::Camera g_camera;
struct PerDeviceSampleState
{
int32_t device_idx = -1;
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_gas_output_buffer = 0; // Triangle AS memory
OptixModule ptx_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup miss_prog_group = 0;
OptixProgramGroup hitgroup_prog_group = 0;
OptixShaderBindingTable sbt = {};
Params params = {};
Params* d_params = nullptr;
CUstream stream = 0;
// Only valid on the host
demandLoading::Ticket ticket;
};
template <typename T>
struct SbtRecord
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef SbtRecord<RayGenData> RayGenSbtRecord;
typedef SbtRecord<MissData> MissSbtRecord;
typedef SbtRecord<HitGroupData> HitGroupSbtRecord;
void printUsageAndExit( const char* argv0 )
{
// clang-format off
std::cerr
<< "\nUsage : " << argv0 << " [options]\n"
<< "Options: --help | -h Print this usage message\n"
<< " --file | -f <filename> Specify file for image output\n"
<< " --dim=<width>x<height> Set image dimensions\n"
#ifdef OPTIX_SAMPLE_USE_OPEN_EXR
<< " --texture | -t <filename> Texture to render (path relative to data folder). Use checkerboard for procedural texture.\n"
#endif
<< " --textureDim=<width>x<height> Set dimensions of procedural texture (default 2048x2048).\n"
<< " --bias | -b <bias> Mip level bias (default 0.0)\n"
<< " --textureScale <s> Texture scale (how many times to wrap the texture around the sphere) (default 1.0f)\n"
<< " --bucketSize <dim> The size of the screen-space tiles used for rendering (default 256).\n"
<< " --numThreads <n> The number of threads to use for processing requests; 0 is automatic (default 0).\n"
<< "\n";
// clang-format on
exit( 1 );
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void initCameraState()
{
float3 camEye = {-6.0f, 0.0f, 0.0f};
g_camera.setEye( camEye );
g_camera.setLookat( make_float3( 0.0f, 0.0f, 0.0f ) );
g_camera.setUp( make_float3( 0.0f, 0.0f, 1.0f ) );
g_camera.setFovY( 30.0f );
g_camera.setAspectRatio( static_cast<float>( g_width ) / static_cast<float>( g_height ) );
}
unsigned int getNumDevices()
{
int numDevices;
CUDA_CHECK( cudaGetDeviceCount( &numDevices ) );
return static_cast<unsigned int>( numDevices );
}
void createContext( PerDeviceSampleState& state )
{
// Initialize CUDA on this device
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cuCtx = 0; // zero means take the current context
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
state.context = context;
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
}
void createContexts( std::vector<PerDeviceSampleState>& states )
{
OPTIX_CHECK( optixInit() );
unsigned int numDevices = getNumDevices();
states.resize( numDevices );
for( unsigned int i = 0; i < numDevices; ++i )
{
states[i].device_idx = i;
CUDA_CHECK( cudaSetDevice( i ) );
createContext( states[i] );
}
}
void buildAccel( PerDeviceSampleState& state )
{
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
// AABB build input
OptixAabb aabb = {-1.5f, -1.5f, -1.5f, 1.5f, 1.5f, 1.5f};
CUdeviceptr d_aabb_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_aabb_buffer ), sizeof( OptixAabb ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_aabb_buffer ), &aabb, sizeof( OptixAabb ), cudaMemcpyHostToDevice ) );
OptixBuildInput aabb_input = {};
aabb_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
aabb_input.customPrimitiveArray.aabbBuffers = &d_aabb_buffer;
aabb_input.customPrimitiveArray.numPrimitives = 1;
uint32_t aabb_input_flags[1] = {OPTIX_GEOMETRY_FLAG_NONE};
aabb_input.customPrimitiveArray.flags = aabb_input_flags;
aabb_input.customPrimitiveArray.numSbtRecords = 1;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accel_options, &aabb_input, 1, &gas_buffer_sizes ) );
CUdeviceptr d_temp_buffer_gas;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer_gas ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ), compactedSizeOffset + 8 ) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild( state.context,
0, // CUDA stream
&accel_options, &aabb_input,
1, // num build inputs
d_temp_buffer_gas, gas_buffer_sizes.tempSizeInBytes, d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes, &state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( (void*)d_temp_buffer_gas ) );
CUDA_CHECK( cudaFree( (void*)d_aabb_buffer ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.gas_handle, state.d_gas_output_buffer,
compacted_gas_size, &state.gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createModule( PerDeviceSampleState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = 100;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
state.pipeline_compile_options.numPayloadValues = 3;
state.pipeline_compile_options.numAttributeValues = 6;
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE; // TODO: should be OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixDemandTexture.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( state.context, &module_compile_options, &state.pipeline_compile_options,
input, inputSize, log, &sizeof_log, &state.ptx_module ) );
}
void createProgramGroups( PerDeviceSampleState& state )
{
OptixProgramGroupOptions program_group_options = {}; // Initialize to zeros
OptixProgramGroupDesc raygen_prog_group_desc = {}; //
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.raygen_prog_group ) );
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__ms";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.miss_prog_group ) );
OptixProgramGroupDesc hitgroup_prog_group_desc = {};
hitgroup_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hitgroup_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__ch";
hitgroup_prog_group_desc.hitgroup.moduleAH = nullptr;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameAH = nullptr;
hitgroup_prog_group_desc.hitgroup.moduleIS = state.ptx_module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameIS = "__intersection__is";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &hitgroup_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.hitgroup_prog_group ) );
}
void createPipeline( PerDeviceSampleState& state )
{
const uint32_t max_trace_depth = 1;
OptixProgramGroup program_groups[] = {state.raygen_prog_group, state.miss_prog_group, state.hitgroup_prog_group};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate( state.context, &state.pipeline_compile_options, &pipeline_link_options,
program_groups, sizeof( program_groups ) / sizeof( program_groups[0] ), log,
&sizeof_log, &state.pipeline ) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
void createSBT( PerDeviceSampleState& state, const DemandTexture& texture, float texture_scale, float texture_lod )
{
CUdeviceptr raygen_record;
const size_t raygen_record_size = sizeof( RayGenSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &raygen_record ), raygen_record_size ) );
RayGenSbtRecord rg_sbt = {};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( raygen_record ), &rg_sbt, raygen_record_size, cudaMemcpyHostToDevice ) );
CUdeviceptr miss_record;
size_t miss_record_size = sizeof( MissSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &miss_record ), miss_record_size ) );
MissSbtRecord ms_sbt;
ms_sbt.data = {0.05f, 0.05f, 0.3f};
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_prog_group, &ms_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( miss_record ), &ms_sbt, miss_record_size, cudaMemcpyHostToDevice ) );
// The demand-loaded texture id is passed to the closest hit program via the hitgroup record.
CUdeviceptr hitgroup_record;
size_t hitgroup_record_size = sizeof( HitGroupSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &hitgroup_record ), hitgroup_record_size ) );
HitGroupSbtRecord hg_sbt;
hg_sbt.data = {1.5f /*radius*/, texture.getId(), texture_scale, texture_lod};
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_group, &hg_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( hitgroup_record ), &hg_sbt, hitgroup_record_size, cudaMemcpyHostToDevice ) );
state.sbt.raygenRecord = raygen_record;
state.sbt.missRecordBase = miss_record;
state.sbt.missRecordStrideInBytes = sizeof( MissSbtRecord );
state.sbt.missRecordCount = 1;
state.sbt.hitgroupRecordBase = hitgroup_record;
state.sbt.hitgroupRecordStrideInBytes = sizeof( HitGroupSbtRecord );
state.sbt.hitgroupRecordCount = 1;
}
void cleanupState( PerDeviceSampleState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.miss_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.hitgroup_prog_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_CHECK( cudaStreamDestroy( state.stream ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
if( state.params.nonDemandTexture != 0 )
CUDA_CHECK( cudaDestroyTextureObject( state.params.nonDemandTexture ) );
if( state.params.nonDemandTextureArray != 0 )
CUDA_CHECK( cudaFreeMipmappedArray( state.params.nonDemandTextureArray ) );
}
TextureDescriptor makeTextureDescription()
{
TextureDescriptor texDesc{};
texDesc.addressMode[0] = CU_TR_ADDRESS_MODE_WRAP;
texDesc.addressMode[1] = CU_TR_ADDRESS_MODE_WRAP;
texDesc.filterMode = CU_TR_FILTER_MODE_LINEAR;
texDesc.mipmapFilterMode = CU_TR_FILTER_MODE_LINEAR;
texDesc.maxAnisotropy = 16;
return texDesc;
}
void initLaunchParams( PerDeviceSampleState& state, unsigned int numDevices )
{
state.params.image_width = g_width;
state.params.image_height = g_height;
state.params.origin_x = g_width / 2;
state.params.origin_y = g_height / 2;
state.params.handle = state.gas_handle;
state.params.device_idx = state.device_idx;
state.params.num_devices = numDevices;
state.params.mipLevelBias = g_mipLevelBias;
// state.params.nonDemandTextureArray and state.params.nonDemandTexture set in makeStaticTexture
state.params.eye = g_camera.eye();
g_camera.UVWFrame( state.params.U, state.params.V, state.params.W );
if( state.d_params == nullptr )
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params ), sizeof( Params ) ) );
}
// Returns number of requests processed (over all streams and devices).
unsigned int performLaunches( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PerDeviceSampleState>& states, DemandLoader& demandLoader )
{
auto startTime = std::chrono::steady_clock::now();
const uint32_t bucketCountX = ( g_width + g_bucketSize - 1 ) / g_bucketSize;
const uint32_t bucketCountY = ( g_height + g_bucketSize - 1 ) / g_bucketSize;
const uint32_t numBuckets = bucketCountX * bucketCountY;
uchar4* outputPtr = output_buffer.map();
for( auto& state : states )
state.params.result_buffer = outputPtr;
uint32_t numRequestsProcessed = 0;
uint32_t bucketIdx = 0;
while( bucketIdx < numBuckets )
{
for( auto& state : states )
{
uint32_t cur_index = bucketIdx++;
if( cur_index >= numBuckets )
continue;
state.params.bucket_index = cur_index;
state.params.bucket_width = g_bucketSize;
state.params.bucket_height = g_bucketSize;
demandLoader.launchPrepare( state.device_idx,
state.stream,
state.params.demandTextureContext );
initLaunchParams( state, static_cast<unsigned int>( states.size() ) );
// Perform the rendering launches
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( state.d_params ), &state.params, sizeof( Params ),
cudaMemcpyHostToDevice, state.stream ) );
OPTIX_CHECK( optixLaunch( state.pipeline,
state.stream,
reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.bucket_width, // launch width
state.params.bucket_height, // launch height
1 // launch depth
) );
// Initiate asynchronous request processing for the previous launch
state.ticket = demandLoader.processRequests(
state.device_idx, state.stream, state.params.demandTextureContext );
}
// Wait for any outstanding requests
for( auto& state : states )
{
state.ticket.wait();
assert( state.ticket.numTasksTotal() >= 0 );
numRequestsProcessed += state.ticket.numTasksTotal();
}
}
output_buffer.unmap();
++g_totalLaunches;
g_totalLaunchTime += std::chrono::duration<double>( std::chrono::steady_clock::now() - startTime ).count();
return numRequestsProcessed;
}
int main( int argc, char* argv[] )
{
std::string outfile;
float textureScale = 4.0f;
// Image credit: CC0Textures.com (https://cc0textures.com/view.php?tex=Bricks12)
// Licensed under the Creative Commons CC0 License.
std::string textureFile = "Textures/Bricks12_col.exr"; // use --texture "" for procedural texture
for( int i = 1; i < argc; ++i )
{
const std::string arg( argv[i] );
bool lastArg = ( i == argc - 1 );
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( ( arg == "--file" || arg == "-f" ) && !lastArg )
{
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
sutil::parseDimensions( arg.substr( 6 ).c_str(), g_width, g_height );
}
else if( ( arg == "--texture" || arg == "-t" ) && !lastArg )
{
textureFile = argv[++i];
}
else if( arg.substr( 0, 13 ) == "--textureDim=" )
{
sutil::parseDimensions( arg.substr( 13 ).c_str(), g_textureWidth, g_textureHeight );
}
else if( ( arg == "--bias" || arg == "-b" ) && !lastArg )
{
g_mipLevelBias = static_cast<float>( atof( argv[++i] ) );
}
else if( arg == "--textureScale" && !lastArg )
{
textureScale = static_cast<float>( atof( argv[++i] ) );
}
else if( arg == "--bucketSize" && !lastArg )
{
g_bucketSize = atoi( argv[++i] );
if( g_bucketSize <= 0 )
{
std::cerr << "Warning: Bucket size must be greater than 0. Setting bucket size to 256" << std::endl;
g_bucketSize = 256;
}
}
else if( arg == "--numThreads" && !lastArg )
{
g_numThreads = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << arg << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
std::vector<PerDeviceSampleState> states;
createContexts( states );
// Initialize DemandLoader and create a demand-loaded texture.
// The texture id is passed to the closest hit shader via a hit group record in the SBT.
// The texture sampler array (indexed by texture id) is passed as a launch parameter.
demandLoading::Options options{};
options.maxThreads = g_numThreads; // maximum threads to use when processing page requests
std::shared_ptr<DemandLoader> demandLoader( createDemandLoader( options ), destroyDemandLoader );
std::unique_ptr<ImageReader> imageReader;
// Make an exr reader or a procedural texture reader based on the textureFile name
#ifdef OPTIX_SAMPLE_USE_OPEN_EXR
if( !textureFile.empty() && textureFile != "checkerboard" )
{
std::string textureFilename( sutil::sampleDataFilePath( textureFile.c_str() ) );
imageReader = std::unique_ptr<ImageReader>( new EXRReader( textureFilename.c_str() ) );
}
#endif
if( imageReader == nullptr )
{
const int squaresPerSide = 32;
const bool useMipmaps = true;
imageReader = std::unique_ptr<ImageReader>(
new CheckerBoardImage( g_textureWidth, g_textureHeight, squaresPerSide, useMipmaps ) );
}
// Create a demand-loaded texture
TextureDescriptor texDesc = makeTextureDescription();
const DemandTexture& texture = demandLoader->createTexture( std::move( imageReader ), texDesc );
// Set up OptiX per-device states
for( PerDeviceSampleState& state : states )
{
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
buildAccel( state );
createModule( state );
createProgramGroups( state );
createPipeline( state );
createSBT( state, texture, textureScale, 0.f /*textureLod*/ );
}
// Create the output buffer to hold the rendered image
sutil::CUDAOutputBuffer<uchar4> outputBuffer( sutil::CUDAOutputBufferType::ZERO_COPY, g_width, g_height );
unsigned int numFilled = 0;
const int maxLaunches = 1024;
// Perform launches (launch until there are no more requests to fill), up to
// the maximum number of launches.
do
{
numFilled = performLaunches( outputBuffer, states, *demandLoader );
g_totalRequests += numFilled;
} while( numFilled > 0 && g_totalLaunches < maxLaunches );
std::cout << "Launches: " << g_totalLaunches << "\n";
std::cout << "Avg. launch time: " << ( 1000.0 * g_totalLaunchTime / g_totalLaunches ) << " ms\n";
std::cout << "Texture tile requests: " << g_totalRequests << "\n";
// Display result image
{
sutil::ImageBuffer buffer;
buffer.data = outputBuffer.getHostPointer();
buffer.width = g_width;
buffer.height = g_height;
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
if( outfile.empty() )
sutil::displayBufferWindow( argv[0], buffer );
else
sutil::saveImage( outfile.c_str(), buffer, false );
}
// Clean up the states, deleting their resources
for( PerDeviceSampleState& state : states )
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDemandTexture/optixDemandTexture.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "optixDemandTexture.h"
#include <DemandLoading/DeviceContext.h>
#include <DemandLoading/Texture2D.h>
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda_runtime.h>
// Whether to use tex2DLod or tex2DGrad
//#define USE_TEX2DLOD 1
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
// Per ray data for closets hit program and functions to access it
//
//------------------------------------------------------------------------------
struct RayPayload
{
// Return value
float3 rgb;
// Ray differential
float3 origin_dx;
float3 origin_dy;
float3 direction_dx;
float3 direction_dy;
// padding
int pad;
};
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RayPayload* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RayPayload*>( unpackPointer( u0, u1 ) );
}
//------------------------------------------------------------------------------
//
// Utility functions
//
//------------------------------------------------------------------------------
// trace a ray
static __forceinline__ __device__ void trace( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RayPayload* prd )
{
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace( handle, ray_origin, ray_direction, tmin, tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
// Convert Cartesian coordinates to polar coordinates
__forceinline__ __device__ float3 cartesian_to_polar( const float3& v )
{
float azimuth;
float elevation;
float radius = length( v );
float r = sqrtf( v.x * v.x + v.y * v.y );
if( r > 0.0f )
{
azimuth = atanf( v.y / v.x );
elevation = atanf( v.z / r );
if( v.x < 0.0f )
azimuth += M_PIf;
else if( v.y < 0.0f )
azimuth += M_PIf * 2.0f;
}
else
{
azimuth = 0.0f;
if( v.z > 0.0f )
elevation = +M_PI_2f;
else
elevation = -M_PI_2f;
}
return make_float3( azimuth, elevation, radius );
}
// Compute texture derivatives in texture space from texture derivatives in world space
// and ray differentials.
inline __device__ void computeTextureDerivatives( float2& dpdx, // texture derivative in x (out)
float2& dpdy, // texture derivative in y (out)
const float3& dPds, // world space texture derivative
const float3& dPdt, // world space texture derivative
float3 rdx, // ray differential in x
float3 rdy, // ray differential in y
const float3& normal,
const float3& rayDir )
{
// Compute scale factor to project differentials onto surface plane
float s = dot( rayDir, normal );
// Clamp s to keep ray differentials from blowing up at grazing angles. Prevents overblurring.
const float sclamp = 0.1f;
if( s >= 0.0f && s < sclamp )
s = sclamp;
if( s < 0.0f && s > -sclamp )
s = -sclamp;
// Project the ray differentials to the surface plane.
float tx = dot( rdx, normal ) / s;
float ty = dot( rdy, normal ) / s;
rdx -= tx * rayDir;
rdy -= ty * rayDir;
// Compute the texture derivatives in texture space. These are calculated as the
// dot products of the projected ray differentials with the texture derivatives.
dpdx = make_float2( dot( dPds, rdx ), dot( dPdt, rdx ) );
dpdy = make_float2( dot( dPds, rdy ), dot( dPdt, rdy ) );
}
//------------------------------------------------------------------------------
//
// Optix programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
// Determine which pixel to render from the launch index
const int imageWidth = params.image_width;
const int imageHeight = params.image_height;
const uint3 launchIdx = optixGetLaunchIndex();
// Remap the launch index to the pixel index based on the bucket dimensions
uint3 pixelIdx = make_uint3( 0, 0, 0 );
{
const unsigned int bucket_index = params.bucket_index;
const unsigned int bucket_width = params.bucket_width;
const unsigned int bucket_height = params.bucket_height;
const unsigned int bucket_count_x = ( imageWidth + bucket_width - 1 ) / bucket_width;
const unsigned int bucket_x = bucket_index % bucket_count_x;
const unsigned int bucket_y = bucket_index / bucket_count_x;
pixelIdx = make_uint3( launchIdx.x + bucket_width * bucket_x,
launchIdx.y + bucket_height * bucket_y,
0 );
}
if( pixelIdx.x >= imageWidth || pixelIdx.y >= imageHeight )
return;
// Get the camera parameters
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const float2 d =
2.0f * make_float2( static_cast<float>( pixelIdx.x ) / imageWidth, static_cast<float>( pixelIdx.y ) / imageHeight ) - 1.0f;
// Construct the ray
const float3 origin = params.eye;
const float3 direction = normalize( d.x * U + d.y * V + W );
// Construct the ray payload with ray differentials
RayPayload prd;
prd.rgb = make_float3( 0.0f );
prd.origin_dx = make_float3( 0.0f );
prd.origin_dy = make_float3( 0.0f );
const float Wlen = length( W );
// TODO: This is not 100% correct, since U and V are not perpendicular to the ray direction
prd.direction_dx = U * ( 2.0f / ( imageWidth * Wlen ) );
prd.direction_dy = V * ( 2.0f / ( imageHeight * Wlen ) );
trace( params.handle, origin, direction,
0.00f, // tmin
1e16f, // tmax
&prd );
params.result_buffer[pixelIdx.y * params.image_width + pixelIdx.x] = make_color( prd.rgb );
}
extern "C" __global__ void __miss__ms()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RayPayload* prd = getPRD();
prd->rgb = make_float3( rt_data->r, rt_data->g, rt_data->b );
}
extern "C" __global__ void __intersection__is()
{
HitGroupData* hg_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
const float3 orig = optixGetObjectRayOrigin();
const float3 dir = optixGetObjectRayDirection();
const float3 center = {0.f, 0.f, 0.f};
const float radius = hg_data->radius;
const float3 O = orig - center;
const float l = 1 / length( dir );
const float3 D = dir * l;
const float b = dot( O, D );
const float c = dot( O, O ) - radius * radius;
const float disc = b * b - c;
if( disc > 0.0f )
{
const float sdisc = sqrtf( disc );
const float root1 = ( -b - sdisc );
const float root11 = 0.0f;
const float3 shading_normal = ( O + ( root1 + root11 ) * D ) / radius;
float3 polar = cartesian_to_polar( shading_normal );
float3 texcoord = make_float3( polar.x * 0.5f * M_1_PIf, ( polar.y + M_PI_2f ) * M_1_PIf, polar.z / radius );
unsigned int p0, p1, p2;
p0 = float_as_int( texcoord.x );
p1 = float_as_int( texcoord.y );
p2 = float_as_int( texcoord.z );
unsigned int n0, n1, n2;
n0 = float_as_int( shading_normal.x );
n1 = float_as_int( shading_normal.y );
n2 = float_as_int( shading_normal.z );
optixReportIntersection( root1, // t hit
0, // user hit kind
p0, p1, p2, // texture coordinates
n0, n1, n2 ); // geometric normal
}
}
extern "C" __global__ void __closesthit__ch()
{
// The demand-loaded texture id is provided in the hit group data.
HitGroupData* hg_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
unsigned int textureId = hg_data->demand_texture_id;
const float textureScale = hg_data->texture_scale;
const float radius = hg_data->radius;
// The texture coordinates and normal are calculated by the intersection shader are provided as attributes.
const float3 texcoord = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ) );
const float3 N = make_float3( int_as_float( optixGetAttribute_3() ), int_as_float( optixGetAttribute_4() ),
int_as_float( optixGetAttribute_5() ) );
// Compute world space texture derivatives based on normal and radius, assuming a lat/long projection
float3 dPds = radius * 2.0f * M_PIf * make_float3( N.y, -N.x, 0.0f );
dPds /= dot( dPds, dPds );
float3 dPdt = radius * M_PIf * normalize( cross( N, dPds ) );
dPdt /= dot( dPdt, dPdt );
// Compute final texture coordinates
float s = texcoord.x * textureScale - 0.5f * ( textureScale - 1.0f );
float t = ( 1.0f - texcoord.y ) * textureScale - 0.5f * ( textureScale - 1.0f );
// Get the ray direction and hit distance
RayPayload* prd = getPRD();
const float3 rayDir = optixGetWorldRayDirection();
const float thit = optixGetRayTmax();
// Compute the ray differential values at the intersection point
float3 rdx = prd->origin_dx + thit * prd->direction_dx;
float3 rdy = prd->origin_dy + thit * prd->direction_dy;
// Get texture space texture derivatives based on ray differentials
float2 ddx, ddy;
computeTextureDerivatives( ddx, ddy, dPds, dPdt, rdx, rdy, N, rayDir );
// Scale the texture derivatives based on the texture scale (how many times the
// texture wraps around the sphere) and the mip bias
float biasScale = exp2f( params.mipLevelBias );
ddx *= textureScale * biasScale;
ddy *= textureScale * biasScale;
// Sample the texture
const bool requestIfResident = true;
bool isResident = true;
float4 color = tex2DGrad<float4>(
params.demandTextureContext, textureId, s, t, ddx, ddy, &isResident, requestIfResident );
prd->rgb = make_float3( color );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDemandTexture/optixDemandTexture.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <DemandLoading/DeviceContext.h>
#include <cuda_runtime.h>
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_COUNT
};
struct Params
{
// Render buffer
uchar4* result_buffer;
unsigned int image_width;
unsigned int image_height;
// Device-related data for multi-gpu rendering
unsigned int device_idx;
unsigned int num_devices;
int origin_x;
int origin_y;
// Handle to scene description for ray traversal
OptixTraversableHandle handle;
// Camera parameters
float3 eye;
float3 U;
float3 V;
float3 W;
// Bucket parameters (for tiled rendering)
unsigned int bucket_index;
unsigned int bucket_width;
unsigned int bucket_height;
// Texture data
float mipLevelBias;
demandLoading::DeviceContext demandTextureContext;
cudaMipmappedArray_t nonDemandTextureArray;
cudaTextureObject_t nonDemandTexture;
// Render mode
float diffScale;
int numTextureTaps;
};
struct RayGenData
{
// Empty
};
struct MissData
{
// Background color
float r, g, b;
};
struct HitGroupData
{
float radius;
unsigned int demand_texture_id;
float texture_scale;
float texture_lod;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDenoiser/OptiXDenoiser.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <optix_denoiser_tiling.h>
#include <sutil/Exception.h>
#include <cuda_runtime.h>
#include <cmath>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstdlib>
#include <iomanip>
#include <vector>
static void context_log_cb( uint32_t level, const char* tag, const char* message, void* /*cbdata*/ )
{
if( level < 4 )
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
// Create four channel float OptixImage2D with given dimension. Allocate memory on device and
// Copy data from host memory given in hmem to device if hmem is nonzero.
static OptixImage2D createOptixImage2D( unsigned int width, unsigned int height, const float * hmem = nullptr )
{
OptixImage2D oi;
const uint64_t frame_byte_size = width * height * sizeof(float4);
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &oi.data ), frame_byte_size ) );
if( hmem )
{
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( oi.data ),
hmem,
frame_byte_size,
cudaMemcpyHostToDevice
) );
}
oi.width = width;
oi.height = height;
oi.rowStrideInBytes = width*sizeof(float4);
oi.pixelStrideInBytes = sizeof(float4);
oi.format = OPTIX_PIXEL_FORMAT_FLOAT4;
return oi;
}
class OptiXDenoiser
{
public:
struct Data
{
uint32_t width = 0;
uint32_t height = 0;
float* color = nullptr;
float* albedo = nullptr;
float* normal = nullptr;
float* flow = nullptr;
std::vector< float* > aovs; // input AOVs
std::vector< float* > outputs; // denoised beauty, followed by denoised AOVs
};
// Initialize the API and push all data to the GPU -- normaly done only once per session.
// tileWidth, tileHeight: If nonzero, enable tiling with given dimension.
// kpMode: If enabled, use kernel prediction model even if no AOVs are given.
// temporalMode: If enabled, use a model for denoising sequences of images.
// applyFlowMode: Apply flow vectors from current frame to previous image (no denoising).
void init( const Data& data,
unsigned int tileWidth = 0,
unsigned int tileHeight = 0,
bool kpMode = false,
bool temporalMode = false,
bool applyFlowMode = false );
// Execute the denoiser. In interactive sessions, this would be done once per frame/subframe.
void exec();
// Update denoiser input data on GPU from host memory.
void update( const Data& data );
// Copy results from GPU to host memory.
void getResults();
// Cleanup state, deallocate memory -- normally done only once per render session.
void finish();
private:
// --- Test flow vectors: Flow is applied to noisy input image and written back to result.
// --- No denoising.
void applyFlow();
private:
OptixDeviceContext m_context = nullptr;
OptixDenoiser m_denoiser = nullptr;
OptixDenoiserParams m_params = {};
bool m_temporalMode;
bool m_applyFlowMode;
CUdeviceptr m_intensity = 0;
CUdeviceptr m_avgColor = 0;
CUdeviceptr m_scratch = 0;
uint32_t m_scratch_size = 0;
CUdeviceptr m_state = 0;
uint32_t m_state_size = 0;
unsigned int m_tileWidth = 0;
unsigned int m_tileHeight = 0;
unsigned int m_overlap = 0;
OptixDenoiserGuideLayer m_guideLayer = {};
std::vector< OptixDenoiserLayer > m_layers;
std::vector< float* > m_host_outputs;
};
void OptiXDenoiser::init( const Data& data,
unsigned int tileWidth,
unsigned int tileHeight,
bool kpMode,
bool temporalMode,
bool applyFlowMode )
{
SUTIL_ASSERT( data.color );
SUTIL_ASSERT( data.outputs.size() >= 1 );
SUTIL_ASSERT( data.width );
SUTIL_ASSERT( data.height );
SUTIL_ASSERT_MSG( !data.normal || data.albedo, "Currently albedo is required if normal input is given" );
SUTIL_ASSERT_MSG( ( tileWidth == 0 && tileHeight == 0 ) || ( tileWidth > 0 && tileHeight > 0 ), "tile size must be > 0 for width and height" );
m_host_outputs = data.outputs;
m_temporalMode = temporalMode;
m_applyFlowMode = applyFlowMode;
m_tileWidth = tileWidth > 0 ? tileWidth : data.width;
m_tileHeight = tileHeight > 0 ? tileHeight : data.height;
//
// Initialize CUDA and create OptiX context
//
{
// Initialize CUDA
CUDA_CHECK( cudaFree( nullptr ) );
CUcontext cu_ctx = nullptr; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cu_ctx, &options, &m_context ) );
}
//
// Create denoiser
//
{
/*****
// Load user provided model if model.bin is present in the currrent directory,
// configuration of filename not done here.
std::ifstream file( "model.bin" );
if ( file.good() ) {
std::stringstream source_buffer;
source_buffer << file.rdbuf();
OPTIX_CHECK( optixDenoiserCreateWithUserModel( m_context, (void*)source_buffer.str().c_str(), source_buffer.str().size(), &m_denoiser ) );
}
else
*****/
{
OptixDenoiserOptions options = {};
options.guideAlbedo = data.albedo ? 1 : 0;
options.guideNormal = data.normal ? 1 : 0;
OptixDenoiserModelKind modelKind;
if( kpMode || data.aovs.size() > 0 )
modelKind = temporalMode ? OPTIX_DENOISER_MODEL_KIND_TEMPORAL_AOV : OPTIX_DENOISER_MODEL_KIND_AOV;
else
modelKind = temporalMode ? OPTIX_DENOISER_MODEL_KIND_TEMPORAL : OPTIX_DENOISER_MODEL_KIND_HDR;
OPTIX_CHECK( optixDenoiserCreate( m_context, modelKind, &options, &m_denoiser ) );
}
}
//
// Allocate device memory for denoiser
//
{
OptixDenoiserSizes denoiser_sizes;
OPTIX_CHECK( optixDenoiserComputeMemoryResources(
m_denoiser,
m_tileWidth,
m_tileHeight,
&denoiser_sizes
) );
if( tileWidth == 0 )
{
m_scratch_size = static_cast<uint32_t>( denoiser_sizes.withoutOverlapScratchSizeInBytes );
m_overlap = 0;
}
else
{
m_scratch_size = static_cast<uint32_t>( denoiser_sizes.withOverlapScratchSizeInBytes );
m_overlap = denoiser_sizes.overlapWindowSizeInPixels;
}
if( data.aovs.size() == 0 && kpMode == false )
{
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &m_intensity ),
sizeof( float )
) );
}
else
{
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &m_avgColor ),
3 * sizeof( float )
) );
}
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &m_scratch ),
m_scratch_size
) );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &m_state ),
denoiser_sizes.stateSizeInBytes
) );
m_state_size = static_cast<uint32_t>( denoiser_sizes.stateSizeInBytes );
OptixDenoiserLayer layer = {};
layer.input = createOptixImage2D( data.width, data.height, data.color );
layer.output = createOptixImage2D( data.width, data.height );
if( m_temporalMode )
{
// This is the first frame, create zero motion vector image.
void * flowmem;
CUDA_CHECK( cudaMalloc( &flowmem, data.width * data.height * sizeof( float4 ) ) );
CUDA_CHECK( cudaMemset( flowmem, 0, data.width * data.height * sizeof(float4) ) );
m_guideLayer.flow = {(CUdeviceptr)flowmem, data.width, data.height, (unsigned int)(data.width * sizeof( float4 )), (unsigned int)sizeof( float4 ), OPTIX_PIXEL_FORMAT_FLOAT4 };
layer.previousOutput = layer.input; // first frame
}
m_layers.push_back( layer );
if( data.albedo )
m_guideLayer.albedo = createOptixImage2D( data.width, data.height, data.albedo );
if( data.normal )
m_guideLayer.normal = createOptixImage2D( data.width, data.height, data.normal );
for( size_t i=0; i < data.aovs.size(); i++ )
{
layer.input = createOptixImage2D( data.width, data.height, data.aovs[i] );
layer.output = createOptixImage2D( data.width, data.height );
if( m_temporalMode )
layer.previousOutput = layer.input; // first frame
m_layers.push_back( layer );
}
}
//
// Setup denoiser
//
{
OPTIX_CHECK( optixDenoiserSetup(
m_denoiser,
nullptr, // CUDA stream
m_tileWidth + 2 * m_overlap,
m_tileHeight + 2 * m_overlap,
m_state,
m_state_size,
m_scratch,
m_scratch_size
) );
m_params.denoiseAlpha = 0;
m_params.hdrIntensity = m_intensity;
m_params.hdrAverageColor = m_avgColor;
m_params.blendFactor = 0.0f;
}
}
void OptiXDenoiser::update( const Data& data )
{
SUTIL_ASSERT( data.color );
SUTIL_ASSERT( data.outputs.size() >= 1 );
SUTIL_ASSERT( data.width );
SUTIL_ASSERT( data.height );
SUTIL_ASSERT_MSG( !data.normal || data.albedo, "Currently albedo is required if normal input is given" );
m_host_outputs = data.outputs;
CUDA_CHECK( cudaMemcpy( (void*)m_layers[0].input.data, data.color, data.width * data.height * sizeof( float4 ), cudaMemcpyHostToDevice ) );
if( m_temporalMode )
{
CUDA_CHECK( cudaMemcpy( (void*)m_guideLayer.flow.data, data.flow, data.width * data.height * sizeof( float4 ), cudaMemcpyHostToDevice ) );
m_layers[0].previousOutput = m_layers[0].output;
}
if( data.albedo )
CUDA_CHECK( cudaMemcpy( (void*)m_guideLayer.albedo.data, data.albedo, data.width * data.height * sizeof( float4 ), cudaMemcpyHostToDevice ) );
if( data.normal )
CUDA_CHECK( cudaMemcpy( (void*)m_guideLayer.normal.data, data.normal, data.width * data.height * sizeof( float4 ), cudaMemcpyHostToDevice ) );
for( size_t i=0; i < data.aovs.size(); i++ )
{
CUDA_CHECK( cudaMemcpy( (void*)m_layers[i].input.data, data.aovs[i], data.width * data.height * sizeof( float4 ), cudaMemcpyHostToDevice ) );
if( m_temporalMode )
m_layers[i].previousOutput = m_layers[i].output;
}
}
void OptiXDenoiser::exec()
{
if( m_intensity )
{
OPTIX_CHECK( optixDenoiserComputeIntensity(
m_denoiser,
nullptr, // CUDA stream
&m_layers[0].input,
m_intensity,
m_scratch,
m_scratch_size
) );
}
if( m_avgColor )
{
OPTIX_CHECK( optixDenoiserComputeAverageColor(
m_denoiser,
nullptr, // CUDA stream
&m_layers[0].input,
m_avgColor,
m_scratch,
m_scratch_size
) );
}
if( m_applyFlowMode )
{
applyFlow();
}
else
{
/** This sample is always using tiling mode.
OPTIX_CHECK( optixDenoiserInvoke(
m_denoiser,
nullptr, // CUDA stream
&m_params,
m_state,
m_state_size,
&m_guideLayer,
m_layers.data(),
static_cast<unsigned int>( m_layers.size() ),
0, // input offset X
0, // input offset y
m_scratch,
m_scratch_size
) );
**/
OPTIX_CHECK( optixUtilDenoiserInvokeTiled(
m_denoiser,
nullptr, // CUDA stream
&m_params,
m_state,
m_state_size,
&m_guideLayer,
m_layers.data(),
static_cast<unsigned int>( m_layers.size() ),
m_scratch,
m_scratch_size,
m_overlap,
m_tileWidth,
m_tileHeight
) );
}
CUDA_SYNC_CHECK();
}
inline float catmull_rom(
float p[4],
float t)
{
return p[1] + 0.5f * t * ( p[2] - p[0] + t * ( 2.f * p[0] - 5.f * p[1] + 4.f * p[2] - p[3] + t * ( 3.f * ( p[1] - p[2]) + p[3] - p[0] ) ) );
}
// Apply flow to image at given pixel position (using bilinear interpolation), write back RGB result.
static void addFlow(
float4* result,
const float4* image,
const float4* flow,
unsigned int width,
unsigned int height,
unsigned int x,
unsigned int y )
{
float dst_x = float( x ) - flow[x + y * width].x;
float dst_y = float( y ) - flow[x + y * width].y;
float x0 = dst_x - 1.f;
float y0 = dst_y - 1.f;
float r[4][4], g[4][4], b[4][4];
for (int j=0; j < 4; j++)
{
for (int k=0; k < 4; k++)
{
int tx = static_cast<int>( x0 ) + k;
if( tx < 0 )
tx = 0;
else if( tx >= (int)width )
tx = width - 1;
int ty = static_cast<int>( y0 ) + j;
if( ty < 0 )
ty = 0;
else if( ty >= (int)height )
ty = height - 1;
r[j][k] = image[tx + ty * width].x;
g[j][k] = image[tx + ty * width].y;
b[j][k] = image[tx + ty * width].z;
}
}
float tx = dst_x <= 0.f ? 0.f : dst_x - floorf( dst_x );
r[0][0] = catmull_rom( r[0], tx );
r[0][1] = catmull_rom( r[1], tx );
r[0][2] = catmull_rom( r[2], tx );
r[0][3] = catmull_rom( r[3], tx );
g[0][0] = catmull_rom( g[0], tx );
g[0][1] = catmull_rom( g[1], tx );
g[0][2] = catmull_rom( g[2], tx );
g[0][3] = catmull_rom( g[3], tx );
b[0][0] = catmull_rom( b[0], tx );
b[0][1] = catmull_rom( b[1], tx );
b[0][2] = catmull_rom( b[2], tx );
b[0][3] = catmull_rom( b[3], tx );
float ty = dst_y <= 0.f ? 0.f : dst_y - floorf( dst_y );
result[y * width + x].x = catmull_rom( r[0], ty );
result[y * width + x].y = catmull_rom( g[0], ty );
result[y * width + x].z = catmull_rom( b[0], ty );
}
// Apply flow from current frame to the previous noisy image.
void OptiXDenoiser::applyFlow()
{
if( m_layers.size() == 0 )
return;
const uint64_t frame_byte_size = m_layers[0].output.width*m_layers[0].output.height*sizeof(float4);
const float4* device_flow = (float4*)m_guideLayer.flow.data;
if( !device_flow )
return;
float4* flow = new float4[ frame_byte_size ];
CUDA_CHECK( cudaMemcpy( flow, device_flow, frame_byte_size, cudaMemcpyDeviceToHost ) );
float4* image = new float4[ frame_byte_size ];
float4* result = new float4[frame_byte_size];
for( size_t i=0; i < m_layers.size(); i++ )
{
CUDA_CHECK( cudaMemcpy( image, (float4*)m_layers[i].previousOutput.data, frame_byte_size, cudaMemcpyDeviceToHost ) );
for( unsigned int y=0; y < m_layers[i].previousOutput.height; y++ )
for( unsigned int x=0; x < m_layers[i].previousOutput.width; x++ )
addFlow( result, image, flow, m_layers[i].previousOutput.width, m_layers[i].previousOutput.height, x, y );
CUDA_CHECK( cudaMemcpy( (void*)m_layers[i].output.data, result, frame_byte_size, cudaMemcpyHostToDevice ) );
}
delete[] result;
delete[] image;
delete[] flow;
}
void OptiXDenoiser::getResults()
{
const uint64_t frame_byte_size = m_layers[0].output.width*m_layers[0].output.height*sizeof(float4);
for( size_t i=0; i < m_layers.size(); i++ )
{
CUDA_CHECK( cudaMemcpy(
m_host_outputs[i],
reinterpret_cast<void*>( m_layers[i].output.data ),
frame_byte_size,
cudaMemcpyDeviceToHost
) );
// We start with a noisy image in this mode for each frame, otherwise the warped images would accumulate.
if( m_applyFlowMode )
CUDA_CHECK( cudaMemcpy( (void*)m_layers[i].previousOutput.data, reinterpret_cast<void*>( m_layers[i].input.data ),
frame_byte_size, cudaMemcpyDeviceToHost ) );
}
}
void OptiXDenoiser::finish()
{
// Cleanup resources
optixDenoiserDestroy( m_denoiser );
optixDeviceContextDestroy( m_context );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_intensity)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_avgColor)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_scratch)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_state)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_guideLayer.albedo.data)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_guideLayer.normal.data)) );
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_guideLayer.flow.data)) );
for( size_t i=0; i < m_layers.size(); i++ )
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_layers[i].input.data) ) );
for( size_t i=0; i < m_layers.size(); i++ )
CUDA_CHECK( cudaFree(reinterpret_cast<void*>(m_layers[i].output.data) ) );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDenoiser/optixDenoiser.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "OptiXDenoiser.h"
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include <cmath>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <string>
#include <vector>
//------------------------------------------------------------------------------
//
// optixDenoiser -- Demonstration of the OptiX denoising API.
//
//------------------------------------------------------------------------------
void printUsageAndExit( const std::string& argv0 )
{
std::cout << "Usage : " << argv0 << " [options] {-A | --AOV aov.exr} color.exr\n"
<< "Options: -n | --normal <normal.exr>\n"
<< " -a | --albedo <albedo.exr>\n"
<< " -f | --flow <flow.exr>\n"
<< " -o | --out <out.exr> Defaults to 'denoised.exr'\n"
<< " -F | --Frames <int-int> first-last frame number in sequence\n"
<< " -e | --exposure <float> apply exposure on output images\n"
<< " -t | --tilesize <int> <int> use tiling to save GPU memory\n"
<< " -z apply flow to input images (no denoising), write output\n"
<< " -k use kernel prediction model even if there are no AOVs\n"
<< "in sequences, first occurrence of '+' characters substring in filenames is replaced by framenumber\n"
<< std::endl;
exit( 0 );
}
// filename is copied to result and the first sequence of "+" characters is
// replaced (using leading zeros) with framename.
// true is returned if the framenumber is -1 or if the function was successful.
static bool getFrameFilename( std::string& result, const std::string& filename, int frame )
{
result = filename;
if( frame == -1 )
return true;
size_t nplus = 0;
size_t ppos = result.find( '+' );
if( ppos == std::string::npos )
return true; // static filename without "+" characters
size_t cpos = ppos;
while( result[cpos] != 0 && result[cpos] == '+' )
{
nplus++;
cpos++;
}
std::string fn = std::to_string( frame );
if( fn.length() > nplus )
{
std::cout << "illegal temporal filename, framenumber requires " << fn.length()
<< " digits, \"+\" placeholder length: " << nplus << "too small" << std::endl;
return false;
}
for( size_t i = 0; i < nplus; i++ )
result[ppos + i] = '0';
for( size_t i = 0; i < fn.length(); i++ )
result[ppos + nplus - 1 - i] = fn[fn.length() - 1 - i];
return true;
}
int32_t main( int32_t argc, char** argv )
{
if( argc < 2 )
printUsageAndExit( argv[0] );
std::string color_filename = argv[argc - 1];
std::string normal_filename;
std::string albedo_filename;
std::string flow_filename;
std::string output_filename = "denoised.exr";
std::vector<std::string> aov_filenames;
bool kpMode = false;
bool applyFlow = false;
float exposure = 0.f;
int firstFrame = -1, lastFrame = -1;
unsigned int tileWidth = 0, tileHeight = 0;
for( int32_t i = 1; i < argc - 1; ++i )
{
std::string arg( argv[i] );
if( arg == "-n" || arg == "--normal" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
normal_filename = argv[++i];
}
else if( arg == "-a" || arg == "--albedo" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
albedo_filename = argv[++i];
}
else if( arg == "-e" || arg == "--exposure" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
exposure = std::stof( argv[++i] );
}
else if( arg == "-f" || arg == "--flow" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
flow_filename = argv[++i];
}
else if( arg == "-o" || arg == "--out" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
output_filename = argv[++i];
}
else if( arg == "-t" || arg == "--tilesize" )
{
if( i == argc - 3 )
printUsageAndExit( argv[0] );
tileWidth = atoi( argv[++i] );
tileHeight = atoi( argv[++i] );
}
else if( arg == "-A" || arg == "--AOV" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
aov_filenames.push_back( std::string( argv[++i] ) );
}
else if( arg == "-k" )
{
kpMode = true;
}
else if( arg == "-z" )
{
applyFlow = true;
}
else if( arg == "-F" || arg == "--Frames" )
{
if( i == argc - 2 )
printUsageAndExit( argv[0] );
std::string s( argv[++i] );
size_t cpos = s.find( '-' );
if( cpos == 0 || cpos == s.length() - 1 || cpos == std::string::npos )
printUsageAndExit( argv[0] );
firstFrame = atoi( s.substr( 0, cpos ).c_str() );
lastFrame = atoi( s.substr( cpos + 1 ).c_str() );
if( firstFrame < 0 || lastFrame < 0 || firstFrame > lastFrame )
{
std::cout << "illegal frame range, first frame must be <= last frame and >= 0" << std::endl;
exit( 0 );
}
}
else
{
printUsageAndExit( argv[0] );
}
}
bool temporalMode = bool( firstFrame != -1 );
sutil::ImageBuffer color = {};
sutil::ImageBuffer normal = {};
sutil::ImageBuffer albedo = {};
sutil::ImageBuffer flow = {};
std::vector<sutil::ImageBuffer> aovs;
try
{
OptiXDenoiser denoiser;
for( int frame = firstFrame; frame <= lastFrame; frame++ )
{
const double t0 = sutil::currentTime();
std::cout << "Loading inputs ";
if( frame != -1 )
std::cout << "for frame " << frame;
std::cout << " ..." << std::endl;
std::string frame_filename;
if( !getFrameFilename( frame_filename, color_filename, frame ) )
{
std::cout << "cannot open color file" << std::endl;
exit( 0 );
}
color = sutil::loadImage( frame_filename.c_str() );
std::cout << "\tLoaded color image " << frame_filename << " (" << color.width << "x" << color.height << ")"
<< std::endl;
if( !normal_filename.empty() )
{
if( !getFrameFilename( frame_filename, normal_filename, frame ) )
{
std::cout << "cannot open normal file" << std::endl;
exit( 0 );
}
// allocate four channels. only two/three channels used depending on model.
normal = sutil::loadImage( frame_filename.c_str() );
std::cout << "\tLoaded normal image " << frame_filename << std::endl;
}
if( !albedo_filename.empty() )
{
if( !getFrameFilename( frame_filename, albedo_filename, frame ) )
{
std::cout << "cannot open albedo file" << std::endl;
exit( 0 );
}
// allocate four channels. only three channels used.
albedo = sutil::loadImage( frame_filename.c_str() );
std::cout << "\tLoaded albedo image " << frame_filename << std::endl;
}
if( frame > firstFrame && !flow_filename.empty() )
{
if( !getFrameFilename( frame_filename, flow_filename, frame ) )
{
std::cout << "cannot open flow file" << std::endl;
exit( 0 );
}
// allocate four channels. only two channels used.
// sutil::loadImage handles only 3 and 4 channels.
flow = sutil::loadImage( frame_filename.c_str() );
std::cout << "\tLoaded flow image " << frame_filename << std::endl;
}
for( size_t i = 0; i < aov_filenames.size(); i++ )
{
if( !getFrameFilename( frame_filename, aov_filenames[i], frame ) )
{
std::cout << "cannot open aov file" << std::endl;
exit( 0 );
}
aovs.push_back( sutil::loadImage( frame_filename.c_str() ) );
std::cout << "\tLoaded aov image " << frame_filename << std::endl;
}
const double t1 = sutil::currentTime();
std::cout << "\tLoad inputs from disk :" << std::fixed << std::setw( 8 ) << std::setprecision( 2 )
<< ( t1 - t0 ) * 1000.0 << " ms" << std::endl;
SUTIL_ASSERT( color.pixel_format == sutil::FLOAT4 );
SUTIL_ASSERT( !albedo.data || albedo.pixel_format == sutil::FLOAT4 );
SUTIL_ASSERT( !normal.data || normal.pixel_format == sutil::FLOAT4 );
SUTIL_ASSERT( !flow.data || flow.pixel_format == sutil::FLOAT4 );
for( size_t i = 0; i < aov_filenames.size(); i++ )
SUTIL_ASSERT( aovs[i].pixel_format == sutil::FLOAT4 );
OptiXDenoiser::Data data;
data.width = color.width;
data.height = color.height;
data.color = reinterpret_cast<float*>( color.data );
data.albedo = reinterpret_cast<float*>( albedo.data );
data.normal = reinterpret_cast<float*>( normal.data );
data.flow = reinterpret_cast<float*>( flow.data );
// set AOVs
for( size_t i = 0; i < aovs.size(); i++ )
data.aovs.push_back( reinterpret_cast<float*>( aovs[i].data ) );
// allocate outputs
for( size_t i = 0; i < 1 + aovs.size(); i++ )
data.outputs.push_back( new float[color.width * color.height * 4] );
std::cout << "Denoising ..." << std::endl;
if( frame == firstFrame )
{
const double t0 = sutil::currentTime();
denoiser.init( data, tileWidth, tileHeight, kpMode, temporalMode, applyFlow );
const double t1 = sutil::currentTime();
std::cout << "\tAPI Initialization :" << std::fixed << std::setw( 8 ) << std::setprecision( 2 )
<< ( t1 - t0 ) * 1000.0 << " ms" << std::endl;
}
else
{
denoiser.update( data );
}
{
const double t0 = sutil::currentTime();
denoiser.exec();
const double t1 = sutil::currentTime();
std::cout << "\tDenoise frame :" << std::fixed << std::setw( 8 ) << std::setprecision( 2 )
<< ( t1 - t0 ) * 1000.0 << " ms" << std::endl;
}
{
const double t0 = sutil::currentTime();
denoiser.getResults();
const double t1 = sutil::currentTime();
std::cout << "\tCleanup state/copy to host:" << std::fixed << std::setw( 8 ) << std::setprecision( 2 )
<< ( t1 - t0 ) * 1000.0 << " ms" << std::endl;
}
{
const double t0 = sutil::currentTime();
for( size_t i = 0; i < 1 + aovs.size(); i++ )
{
sutil::ImageBuffer output_image;
output_image.width = color.width;
output_image.height = color.height;
output_image.data = data.outputs[i];
output_image.pixel_format = sutil::FLOAT4;
frame_filename = output_filename;
getFrameFilename( frame_filename, output_filename, frame );
if( i > 0 )
{
std::string basename = aov_filenames[i - 1].substr( aov_filenames[i - 1].find_last_of( "/\\" ) + 1 );
std::string::size_type const p( basename.find_last_of( '.' ) );
std::string b = basename.substr( 0, p );
frame_filename.insert( frame_filename.rfind( '.' ), "_" + b + "_denoised" );
}
if( exposure != 0.f )
{
for( unsigned int p = 0; p < color.width * color.height; p++ )
{
float* f = &( (float*)output_image.data )[p * 4 + 0];
f[0] *= std::pow( 2.f, exposure );
f[1] *= std::pow( 2.f, exposure );
f[2] *= std::pow( 2.f, exposure );
}
}
std::cout << "Saving results to '" << frame_filename << "'..." << std::endl;
sutil::saveImage( frame_filename.c_str(), output_image, false );
}
const double t1 = sutil::currentTime();
std::cout << "\tSave output to disk :" << std::fixed << std::setw( 8 ) << std::setprecision( 2 )
<< ( t1 - t0 ) * 1000.0 << " ms" << std::endl;
}
delete[] reinterpret_cast<float*>( color.data );
delete[] reinterpret_cast<float*>( albedo.data );
delete[] reinterpret_cast<float*>( normal.data );
delete[] reinterpret_cast<float*>( flow.data );
for( size_t i = 0; i < 1 + aovs.size(); i++ )
delete[]( data.outputs[i] );
}
denoiser.finish();
}
catch( std::exception& e )
{
std::cerr << "ERROR: exception caught '" << e.what() << "'" << std::endl;
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicGeometry/optixDynamicGeometry.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Trackball.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <optix_stack_size.h>
#include <GLFW/glfw3.h>
#include "optixDynamicGeometry.h"
#include "vertices.h"
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int32_t mouse_button = -1;
//------------------------------------------------------------------------------
//
// Local types
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct DynamicGeometryState
{
OptixDeviceContext context = 0;
size_t temp_buffer_size = 0;
CUdeviceptr d_temp_buffer = 0;
CUdeviceptr d_temp_vertices = 0;
CUdeviceptr d_instances = 0;
unsigned int triangle_flags = OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT;
OptixBuildInput ias_instance_input = {};
OptixBuildInput triangle_input = {};
OptixTraversableHandle ias_handle;
OptixTraversableHandle static_gas_handle;
OptixTraversableHandle deforming_gas_handle;
OptixTraversableHandle exploding_gas_handle;
CUdeviceptr d_ias_output_buffer = 0;
CUdeviceptr d_static_gas_output_buffer;
CUdeviceptr d_deforming_gas_output_buffer;
CUdeviceptr d_exploding_gas_output_buffer;
size_t ias_output_buffer_size = 0;
size_t static_gas_output_buffer_size = 0;
size_t deforming_gas_output_buffer_size = 0;
size_t exploding_gas_output_buffer_size = 0;
OptixModule ptx_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group;
OptixProgramGroup miss_group = 0;
OptixProgramGroup hit_group = 0;
CUstream stream = 0;
Params params;
Params* d_params;
float time = 0.f;
float last_exploding_sphere_rebuild_time = 0.f;
OptixShaderBindingTable sbt = {};
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t g_tessellation_resolution = 128;
const float g_exploding_gas_rebuild_frequency = 10.f;
const int32_t INST_COUNT = 4;
const std::array<float3, INST_COUNT> g_diffuse_colors =
{ {
{ 0.70f, 0.70f, 0.70f },
{ 0.80f, 0.80f, 0.80f },
{ 0.90f, 0.90f, 0.90f },
{ 1.00f, 1.00f, 1.00f }
} };
struct Instance
{
float m[12];
};
const std::array<Instance, INST_COUNT> g_instances =
{ {
{{1, 0, 0, -4.5f, 0, 1, 0, 0, 0, 0, 1, 0}},
{{1, 0, 0, -1.5f, 0, 1, 0, 0, 0, 0, 1, 0}},
{{1, 0, 0, 1.5f, 0, 1, 0, 0, 0, 0, 1, 0}},
{{1, 0, 0, 4.5f, 0, 1, 0, 0, 0, 0, 1, 0}}
} };
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking( static_cast< int >( xpos ), static_cast< int >( ypos ) );
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
Params* params = static_cast< Params* >( glfwGetWindowUserPointer( window ) );
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast< int >( xpos ), static_cast< int >( ypos ), params->width, params->height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast< int >( xpos ), static_cast< int >( ypos ), params->width, params->height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
Params* params = static_cast< Params* >( glfwGetWindowUserPointer( window ) );
params->width = res_x;
params->height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if( trackball.wheelEvent( ( int )yscroll ) )
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --time | -t Animation time for image output (default 1)\n";
std::cerr << " --frames | -n Number of animation frames for image output (default 16)\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 1024x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( DynamicGeometryState& state )
{
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.subframe_index = 0u;
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_params ), sizeof( Params ) ) );
}
void handleCameraUpdate( Params& params )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast< float >( params.width ) / static_cast< float >( params.height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
handleCameraUpdate( params );
handleResize( output_buffer, params );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, DynamicGeometryState& state )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync(
reinterpret_cast< void* >( state.d_params ),
&state.params, sizeof( Params ),
cudaMemcpyHostToDevice, state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast< CUdeviceptr >( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void initCameraState()
{
camera.setEye( make_float3( 0.f, 1.f, -20.f ) );
camera.setLookat( make_float3( 0, 0, 0 ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame(
make_float3( 1.0f, 0.0f, 0.0f ),
make_float3( 0.0f, 0.0f, 1.0f ),
make_float3( 0.0f, 1.0f, 0.0f )
);
trackball.setGimbalLock( true );
}
void createContext( DynamicGeometryState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cu_ctx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cu_ctx, &options, &context ) );
state.context = context;
}
void launchGenerateAnimatedVertices( DynamicGeometryState& state, AnimationMode animation_mode )
{
generateAnimatedVetrices( (float3*)state.d_temp_vertices, animation_mode, state.time, g_tessellation_resolution, g_tessellation_resolution );
}
void updateMeshAccel( DynamicGeometryState& state )
{
// Generate deformed sphere vertices
launchGenerateAnimatedVertices( state, AnimationMode_Deform );
// Update deforming GAS
OptixAccelBuildOptions gas_accel_options = {};
gas_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS;
gas_accel_options.operation = OPTIX_BUILD_OPERATION_UPDATE;
OPTIX_CHECK( optixAccelBuild(
state.context,
state.stream, // CUDA stream
&gas_accel_options,
&state.triangle_input,
1, // num build inputs
state.d_temp_buffer,
state.temp_buffer_size,
state.d_deforming_gas_output_buffer,
state.deforming_gas_output_buffer_size,
&state.deforming_gas_handle,
nullptr, // emitted property list
0 // num emitted properties
) );
// Generate exploding sphere vertices
launchGenerateAnimatedVertices( state, AnimationMode_Explode );
// Update exploding GAS
// Occasionally rebuild to maintain AS quality
if( state.time - state.last_exploding_sphere_rebuild_time > 1 / g_exploding_gas_rebuild_frequency )
{
gas_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
state.last_exploding_sphere_rebuild_time = state.time;
// We don't compress the AS so the size of the GAS won't change and we can rebuild the GAS in-place.
}
OPTIX_CHECK( optixAccelBuild(
state.context,
state.stream, // CUDA stream
&gas_accel_options,
&state.triangle_input,
1, // num build inputs
state.d_temp_buffer,
state.temp_buffer_size,
state.d_exploding_gas_output_buffer,
state.exploding_gas_output_buffer_size,
&state.exploding_gas_handle,
nullptr, // emitted property list
0 // num emitted properties
) );
// Update the IAS
// We refit the IAS as the relative positions of the spheres don't change much so AS quality after update is fine.
OptixAccelBuildOptions ias_accel_options = {};
ias_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE;
ias_accel_options.motionOptions.numKeys = 1;
ias_accel_options.operation = OPTIX_BUILD_OPERATION_UPDATE;
if( g_instances.size() > 1 )
{
float t = sinf( state.time * 4.f );
CUDA_CHECK( cudaMemcpy( ( ( OptixInstance* )state.d_instances )[1].transform + 7, &t, sizeof( float ), cudaMemcpyHostToDevice ) );
}
OPTIX_CHECK( optixAccelBuild( state.context, state.stream, &ias_accel_options, &state.ias_instance_input, 1, state.d_temp_buffer, state.temp_buffer_size,
state.d_ias_output_buffer, state.ias_output_buffer_size, &state.ias_handle, nullptr, 0 ) );
CUDA_SYNC_CHECK();
}
void buildMeshAccel( DynamicGeometryState& state )
{
// Allocate temporary space for vertex generation.
// The same memory space is reused for generating the deformed and exploding vertices before updates.
uint32_t numVertices = g_tessellation_resolution * g_tessellation_resolution * 6;
const size_t vertices_size_in_bytes = numVertices * sizeof( float3 );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_temp_vertices ), vertices_size_in_bytes ) );
// Build static triangulated sphere.
launchGenerateAnimatedVertices( state, AnimationMode_None );
// Build an AS over the triangles.
// We use un-indexed triangles so we can explode the sphere per triangle.
state.triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
state.triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
state.triangle_input.triangleArray.vertexStrideInBytes = sizeof( float3 );
state.triangle_input.triangleArray.numVertices = static_cast< uint32_t >( numVertices );
state.triangle_input.triangleArray.vertexBuffers = &state.d_temp_vertices;
state.triangle_input.triangleArray.flags = &state.triangle_flags;
state.triangle_input.triangleArray.numSbtRecords = 1;
state.triangle_input.triangleArray.sbtIndexOffsetBuffer = 0;
state.triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = 0;
state.triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = 0;
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
state.context,
&accel_options,
&state.triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
state.temp_buffer_size = gas_buffer_sizes.tempSizeInBytes;
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast< void** >( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( ( char* )d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&state.triangle_input,
1, // num build inputs
state.d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.static_gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
// Replicate the uncompressed GAS for the exploding sphere.
// The exploding sphere is occasionally rebuild. We don't want to compress the GAS after every rebuild so we use the uncompressed GAS for the exploding sphere.
// The memory requirements for the uncompressed exploding GAS won't change so we can rebuild in-place.
state.exploding_gas_output_buffer_size = gas_buffer_sizes.outputSizeInBytes;
OptixAccelRelocationInfo relocationInfo;
OPTIX_CHECK( optixAccelGetRelocationInfo( state.context, state.static_gas_handle, &relocationInfo ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_exploding_gas_output_buffer ), state.exploding_gas_output_buffer_size ) );
CUDA_CHECK( cudaMemcpy( ( void* )state.d_exploding_gas_output_buffer, ( const void* )d_buffer_temp_output_gas_and_compacted_size, state.exploding_gas_output_buffer_size, cudaMemcpyDeviceToDevice ) );
OPTIX_CHECK( optixAccelRelocate( state.context, 0, &relocationInfo, 0, 0, state.d_exploding_gas_output_buffer, state.exploding_gas_output_buffer_size, &state.exploding_gas_handle ) );
// Compress GAS
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, ( void* )emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_static_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.static_gas_handle, state.d_static_gas_output_buffer, compacted_gas_size, &state.static_gas_handle ) );
CUDA_CHECK( cudaFree( ( void* )d_buffer_temp_output_gas_and_compacted_size ) );
state.static_gas_output_buffer_size = compacted_gas_size;
}
else
{
state.d_static_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
state.static_gas_output_buffer_size = gas_buffer_sizes.outputSizeInBytes;
}
// Replicate the compressed GAS for the deforming sphere.
// The deforming sphere is never rebuild so we refit the compressed GAS without requiring recompression.
state.deforming_gas_output_buffer_size = state.static_gas_output_buffer_size;
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_deforming_gas_output_buffer ), state.deforming_gas_output_buffer_size ) );
CUDA_CHECK( cudaMemcpy( ( void* )state.d_deforming_gas_output_buffer, ( const void* )state.d_static_gas_output_buffer, state.deforming_gas_output_buffer_size, cudaMemcpyDeviceToDevice ) );
OPTIX_CHECK( optixAccelRelocate( state.context, 0, &relocationInfo, 0, 0, state.d_deforming_gas_output_buffer, state.deforming_gas_output_buffer_size, &state.deforming_gas_handle ) );
// Build the IAS
std::vector<OptixInstance> instances( g_instances.size() );
for( size_t i = 0; i < g_instances.size(); ++i )
{
memcpy( instances[i].transform, g_instances[i].m, sizeof( float ) * 12 );
instances[i].sbtOffset = static_cast< unsigned int >( i );
instances[i].visibilityMask = 255;
}
instances[0].traversableHandle = state.static_gas_handle;
instances[1].traversableHandle = state.static_gas_handle;
instances[2].traversableHandle = state.deforming_gas_handle;
instances[3].traversableHandle = state.exploding_gas_handle;
size_t instances_size_in_bytes = sizeof( OptixInstance ) * instances.size();
CUDA_CHECK( cudaMalloc( ( void** )&state.d_instances, instances_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy( ( void* )state.d_instances, instances.data(), instances_size_in_bytes, cudaMemcpyHostToDevice ) );
state.ias_instance_input.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
state.ias_instance_input.instanceArray.instances = state.d_instances;
state.ias_instance_input.instanceArray.numInstances = static_cast<int>( instances.size() );
OptixAccelBuildOptions ias_accel_options = {};
ias_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE;
ias_accel_options.motionOptions.numKeys = 1;
ias_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes ias_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &ias_accel_options, &state.ias_instance_input, 1, &ias_buffer_sizes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_ias_and_compacted_size;
compactedSizeOffset = roundUp<size_t>( ias_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_buffer_temp_output_ias_and_compacted_size ), compactedSizeOffset + 8 ) );
CUdeviceptr d_ias_temp_buffer;
bool needIASTempBuffer = ias_buffer_sizes.tempSizeInBytes > state.temp_buffer_size;
if( needIASTempBuffer )
{
CUDA_CHECK( cudaMalloc( (void**)&d_ias_temp_buffer, ias_buffer_sizes.tempSizeInBytes ) );
}
else
{
d_ias_temp_buffer = state.d_temp_buffer;
}
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_ias_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild( state.context, 0, &ias_accel_options, &state.ias_instance_input, 1, d_ias_temp_buffer,
ias_buffer_sizes.tempSizeInBytes, d_buffer_temp_output_ias_and_compacted_size,
ias_buffer_sizes.outputSizeInBytes, &state.ias_handle, &emitProperty, 1 ) );
if( needIASTempBuffer )
{
CUDA_CHECK( cudaFree( (void*)d_ias_temp_buffer ) );
}
// Compress the IAS
size_t compacted_ias_size;
CUDA_CHECK( cudaMemcpy( &compacted_ias_size, (void*)emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_ias_size < ias_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_ias_output_buffer ), compacted_ias_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.ias_handle, state.d_ias_output_buffer,
compacted_ias_size, &state.ias_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_ias_and_compacted_size ) );
state.ias_output_buffer_size = compacted_ias_size;
}
else
{
state.d_ias_output_buffer = d_buffer_temp_output_ias_and_compacted_size;
state.ias_output_buffer_size = ias_buffer_sizes.outputSizeInBytes;
}
// allocate enough temporary update space for updating the deforming GAS, exploding GAS and IAS.
size_t maxUpdateTempSize = std::max( ias_buffer_sizes.tempUpdateSizeInBytes, gas_buffer_sizes.tempUpdateSizeInBytes );
if( state.temp_buffer_size < maxUpdateTempSize )
{
CUDA_CHECK( cudaFree( (void*)state.d_temp_buffer ) );
state.temp_buffer_size = maxUpdateTempSize;
CUDA_CHECK( cudaMalloc( (void**)&state.d_temp_buffer, state.temp_buffer_size ) );
}
state.params.handle = state.ias_handle;
}
void createModule( DynamicGeometryState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING;
state.pipeline_compile_options.numPayloadValues = 3;
state.pipeline_compile_options.numAttributeValues = 2;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
state.pipeline_compile_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_TRIANGLE;
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixDynamicGeometry.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module
) );
}
void createProgramGroups( DynamicGeometryState& state )
{
OptixProgramGroupOptions program_group_options = {};
char log[2048];
size_t sizeof_log = sizeof( log );
{
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.raygen_prog_group
) );
}
{
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__ms";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.miss_group
) );
}
{
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__ch";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.hit_group
) );
}
}
void createPipeline( DynamicGeometryState& state )
{
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.miss_group,
state.hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = 1;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline
) );
// We need to specify the max traversal depth. Calculate the stack sizes, so we can specify all
// parameters to optixPipelineSetStackSize.
OptixStackSizes stack_sizes = {};
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.raygen_prog_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.hit_group, &stack_sizes ) );
uint32_t max_trace_depth = 1;
uint32_t max_cc_depth = 0;
uint32_t max_dc_depth = 0;
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes(
&stack_sizes,
max_trace_depth,
max_cc_depth,
max_dc_depth,
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state,
&continuation_stack_size
) );
// This is 2 since the largest depth is IAS->GAS
const uint32_t max_traversable_graph_depth = 2;
OPTIX_CHECK( optixPipelineSetStackSize(
state.pipeline,
direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state,
continuation_stack_size,
max_traversable_graph_depth
) );
}
void createSBT( DynamicGeometryState& state )
{
CUdeviceptr d_raygen_record;
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &d_raygen_record ), raygen_record_size ) );
RayGenRecord rg_sbt = {};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_raygen_record ),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_miss_records;
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &d_miss_records ), miss_record_size ) );
MissRecord ms_sbt[1];
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_group, &ms_sbt[0] ) );
ms_sbt[0].data.bg_color = make_float4( 0.0f );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_miss_records ),
ms_sbt,
miss_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_hitgroup_records;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast< void** >( &d_hitgroup_records ),
hitgroup_record_size * g_instances.size()
) );
std::vector<HitGroupRecord> hitgroup_records( g_instances.size() );
for( int i = 0; i < static_cast<int>( g_instances.size() ); ++i )
{
const int sbt_idx = i;
OPTIX_CHECK( optixSbtRecordPackHeader( state.hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[sbt_idx].data.color = g_diffuse_colors[i];
}
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_hitgroup_records ),
hitgroup_records.data(),
hitgroup_record_size*hitgroup_records.size(),
cudaMemcpyHostToDevice
) );
state.sbt.raygenRecord = d_raygen_record;
state.sbt.missRecordBase = d_miss_records;
state.sbt.missRecordStrideInBytes = static_cast< uint32_t >( miss_record_size );
state.sbt.missRecordCount = 1;
state.sbt.hitgroupRecordBase = d_hitgroup_records;
state.sbt.hitgroupRecordStrideInBytes = static_cast< uint32_t >( hitgroup_record_size );
state.sbt.hitgroupRecordCount = static_cast< uint32_t >( hitgroup_records.size() );
}
void cleanupState( DynamicGeometryState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.hit_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_temp_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_static_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_deforming_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_exploding_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_instances ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_ias_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
DynamicGeometryState state;
state.params.width = 1024;
state.params.height = 768;
state.time = 0.f;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
int num_frames = 16;
float animation_time = 1.f;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else if( arg == "--time" || arg == "-t" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
animation_time = (float)atof( argv[++i] );
}
else if( arg == "--frames" || arg == "-n" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
num_frames = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext( state );
createModule( state );
createProgramGroups( state );
createPipeline( state );
createSBT( state );
initLaunchParams( state );
buildMeshAccel( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixDynamicGeometry", state.params.width, state.params.height );
glfwSetMouseButtonCallback( window, mouseButtonCallback );
glfwSetCursorPosCallback( window, cursorPosCallback );
glfwSetWindowSizeCallback( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback( window, keyCallback );
glfwSetScrollCallback( window, scrollCallback );
glfwSetWindowUserPointer( window, &state.params );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
auto tstart = std::chrono::system_clock::now();
state.last_exploding_sphere_rebuild_time = 0.f;
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
auto tnow = std::chrono::system_clock::now();
std::chrono::duration<double> time = tnow - tstart;
state.time = (float)time.count();
updateMeshAccel( state );
updateState( output_buffer, state.params );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers( window );
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
state.last_exploding_sphere_rebuild_time = 0.f;
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
handleCameraUpdate( state.params );
handleResize( output_buffer, state.params );
// run animation frames
for( unsigned int i = 0; i < static_cast<unsigned int>( num_frames ); ++i )
{
state.time = i * ( animation_time / ( num_frames - 1 ) );
updateMeshAccel( state );
launchSubframe( output_buffer, state );
}
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicGeometry/optixDynamicGeometry.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixDynamicGeometry.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
static __forceinline__ __device__ void trace(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
float3* prd
)
{
unsigned int p0, p1, p2;
p0 = float_as_int( prd->x );
p1 = float_as_int( prd->y );
p2 = float_as_int( prd->z );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
0, // SBT offset
0, // SBT stride
0, // missSBTIndex
p0, p1, p2 );
prd->x = int_as_float( p0 );
prd->y = int_as_float( p1 );
prd->z = int_as_float( p2 );
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
static __forceinline__ __device__ float3 getPayload()
{
return make_float3(
int_as_float( optixGetPayload_0() ),
int_as_float( optixGetPayload_1() ),
int_as_float( optixGetPayload_2() )
);
}
extern "C" __global__ void __raygen__rg()
{
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const float2 d = 2.0f * make_float2(
static_cast< float >( idx.x ) / static_cast< float >( dim.x ),
static_cast< float >( idx.y ) / static_cast< float >( dim.y )
) - 1.0f;
const float3 direction = normalize( d.x * U + d.y * V + W );
float3 payload_rgb = make_float3( 0.5f, 0.5f, 0.5f );
trace( params.handle,
eye,
direction,
0.00f, // tmin
1e16f, // tmax
&payload_rgb );
params.frame_buffer[idx.y * params.width + idx.x] = make_color( payload_rgb );
}
extern "C" __global__ void __miss__ms()
{
MissData* rt_data = reinterpret_cast< MissData* >( optixGetSbtDataPointer() );
float3 payload = getPayload();
setPayload( make_float3( rt_data->bg_color.x, rt_data->bg_color.y, rt_data->bg_color.z ) );
}
extern "C" __global__ void __closesthit__ch()
{
HitGroupData* rt_data = reinterpret_cast< HitGroupData* >( optixGetSbtDataPointer() );
// fetch current triangle vertices
float3 data[3];
optixGetTriangleVertexData( optixGetGASTraversableHandle(), optixGetPrimitiveIndex(), optixGetSbtGASIndex(),
optixGetRayTime(), data );
// compute triangle normal
data[1] -= data[0];
data[2] -= data[0];
float3 normal = make_float3(
data[1].y*data[2].z - data[1].z*data[2].y,
data[1].z*data[2].x - data[1].x*data[2].z,
data[1].x*data[2].y - data[1].y*data[2].x );
const float s = 0.5f / sqrtf( normal.x*normal.x + normal.y*normal.y + normal.z*normal.z );
// convert normal to color and store in payload
setPayload( (normal*s + make_float3( 0.5 )) * rt_data->color );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicGeometry/optixDynamicGeometry.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
struct Params
{
uchar4* frame_buffer;
unsigned int width;
unsigned int height;
float3 eye, U, V, W;
OptixTraversableHandle handle;
int subframe_index;
};
struct RayGenData
{
float3 cam_eye;
float3 camera_u, camera_v, camera_w;
};
struct MissData
{
float4 bg_color;
};
struct HitGroupData
{
float3 color;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicGeometry/vertices.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "vertices.h"
__forceinline__ __device__ float triangle_wave( float x, float shift = 0.f, float period = 2.f * M_PI, float amplitude = 1.f )
{
return fabsf( fmodf( ( 4.f / period ) * ( x - shift ), 4.f * amplitude ) - 2.f * amplitude ) - amplitude;
}
__forceinline__ __device__ void write_animated_triangle( float3* out_vertices, int tidx, float3 v0, float3 v1, float3 v2, AnimationMode mode, float time )
{
float3 v = make_float3( 0 );
if( mode == AnimationMode_Explode )
{
// Generate displacement vector from triangle index
const float theta = ( ( float )M_PI * ( tidx * ( 13 / M_PI ) ) );
const float phi = ( ( float )( 2.0 * M_PI ) * ( tidx * ( 97 / M_PI ) ) );
// Apply displacement to the sphere triangles
v = make_float3( triangle_wave( phi ) * triangle_wave( theta, M_PI / 2.f ),
triangle_wave( phi, M_PI / 2.f ) * triangle_wave( theta, M_PI / 2.f ), triangle_wave( theta ) )
* triangle_wave( time, M_PI / 2.f ) * 2.f;
}
out_vertices[tidx * 3 + 0] = v0 + v;
out_vertices[tidx * 3 + 1] = v1 + v;
out_vertices[tidx * 3 + 2] = v2 + v;
}
__forceinline__ __device__ float3 deform_vertex( const float3& c, AnimationMode mode, float time )
{
// Apply sine wave to the y coordinate of the sphere vertices
if( mode == AnimationMode_Deform )
return make_float3( c.x, c.y * ( 0.5f + 0.4f * cosf( 4 * ( c.x + time ) ) ), c.z );
return c;
}
extern "C" __global__ void generate_vertices(float3* out_vertices, AnimationMode mode, float time, int width, int height)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < width * height )
{
// generate a single patch (two unindexed triangles) of a tessellated sphere
int x = idx % width;
int y = idx / width;
const float theta0 = ( ( float )M_PI * ( y + 0 ) ) / height;
const float theta1 = ( ( float )M_PI * ( y + 1 ) ) / height;
const float phi0 = ( ( float )( 2.0 * M_PI ) * ( x + 0 ) ) / width;
const float phi1 = ( ( float )( 2.0 * M_PI ) * ( x + 1 ) ) / width;
const float ct0 = cosf( theta0 );
const float st0 = sinf( theta0 );
const float ct1 = cosf( theta1 );
const float st1 = sinf( theta1 );
const float cp0 = cosf( phi0 );
const float sp0 = sinf( phi0 );
const float cp1 = cosf( phi1 );
const float sp1 = sinf( phi1 );
const float3 v00 = deform_vertex( make_float3( cp0 * st0, sp0 * st0, ct0 ), mode, time );
const float3 v10 = deform_vertex( make_float3( cp0 * st1, sp0 * st1, ct1 ), mode, time );
const float3 v01 = deform_vertex( make_float3( cp1 * st0, sp1 * st0, ct0 ), mode, time );
const float3 v11 = deform_vertex( make_float3( cp1 * st1, sp1 * st1, ct1 ), mode, time );
write_animated_triangle( out_vertices, idx * 2 + 0, v00, v10, v11, mode, time );
write_animated_triangle( out_vertices, idx * 2 + 1, v00, v11, v01, mode, time );
}
}
extern "C" __host__ void
generateAnimatedVetrices( float3* out_vertices, AnimationMode animation_mode, float time, int width, int height )
{
dim3 threadsPerBlock( 128, 1 );
int numBlocks = ( width * height ) / threadsPerBlock.x;
generate_vertices <<< numBlocks, threadsPerBlock >>> ( out_vertices, animation_mode, time, width, height );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicGeometry/vertices.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <cuda.h>
#include <sutil/vec_math.h>
enum AnimationMode
{
AnimationMode_None,
AnimationMode_Deform,
AnimationMode_Explode
};
extern "C" __host__ void generateAnimatedVetrices( float3* out_vertices, AnimationMode animation_mode, float time, int width, int height );
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicMaterials/optixDynamicMaterials.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/Camera.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <GLFW/glfw3.h>
#include "optixDynamicMaterials.h"
#include <array>
#include <cassert>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <string>
template <typename T>
struct SbtRecord
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef SbtRecord<RayGenData> RayGenSbtRecord;
typedef SbtRecord<MissData> MissSbtRecord;
typedef SbtRecord<HitGroupData> HitGroupSbtRecord;
struct SampleState
{
SampleState( uint32_t width, uint32_t height )
{
params.image_width = width;
params.image_height = height;
}
Params params;
CUdeviceptr d_param;
OptixDeviceContext context = nullptr;
OptixTraversableHandle gas_handle;
OptixTraversableHandle ias_handle;
CUdeviceptr d_gas_output_buffer = 0;
CUdeviceptr d_ias_output_buffer = 0;
OptixModule module = nullptr;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixProgramGroup raygen_prog_group = nullptr;
OptixProgramGroup miss_prog_group = nullptr;
std::vector<OptixProgramGroup> hitgroup_prog_groups;
OptixPipeline pipeline = nullptr;
OptixShaderBindingTable sbt = {};
CUstream stream = 0;
size_t hitgroupSbtRecordStride = 0;
};
struct Matrix
{
float m[12];
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
// Transforms for instances - one on the left (sphere 0), one in the centre and one on the right (sphere 2).
std::vector<Matrix> transforms = {
{ 1, 0, 0, -6, 0, 1, 0, 0, 0, 0, 1, -10 },
{ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, -10 },
{ 1, 0, 0, 6, 0, 1, 0, 0, 0, 0, 1, -10 },
};
// Offsets into SBT for each instance. Hence this needs to be in sync with transforms!
// The middle sphere has two SBT records, the two other instances have one each.
unsigned int sbtOffsets[] = { 0, 1, 3 };
const static std::array<float3, 3> g_colors =
{ { { 1.f, 0.f, 0.f }, { 0.f, 1.f, 0.f }, { 0.f, 0.f, 1.f } } };
// A cycling index (offset), used here as an offset onto hitgroup records.
template <unsigned int MAXINDEX>
struct MaterialIndex
{
MaterialIndex()
: mIndex( 0 )
{
}
unsigned int getVal() const { return mIndex; }
void nextVal()
{
if( ++mIndex == MAXINDEX )
mIndex = 0;
}
private:
unsigned int mIndex;
};
// Left sphere
MaterialIndex<3> g_materialIndex_0;
bool g_hasDataChanged = false;
// Middle sphere
MaterialIndex<2> g_materialIndex_1;
bool g_hasOffsetChanged = false;
// Right sphere
MaterialIndex<3> g_materialIndex_2;
bool g_hasSbtChanged = false;
//------------------------------------------------------------------------------
//
// Helper Functions
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> Specify file for image output\n";
std::cerr << " --help | -h Print this usage message\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 512x384\n";
exit( 1 );
}
void initCamera( SampleState& state )
{
sutil::Camera camera;
camera.setEye( make_float3( 0.0f, 0.0f, 3.0f ) );
camera.setLookat( make_float3( 0.0f, 0.0f, 0.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 60.0f );
camera.setAspectRatio( static_cast<float>( state.params.image_width ) / static_cast<float>( state.params.image_height ) );
camera.UVWFrame( state.params.camera_u, state.params.camera_v, state.params.camera_w );
state.params.cam_eye = camera.eye();
}
void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void createContext( SampleState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &state.context ) );
}
void buildGAS( SampleState& state )
{
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
// AABB build input
OptixAabb aabb = { -1.5f, -1.5f, -1.5f, 1.5f, 1.5f, 1.5f };
CUdeviceptr d_aabb_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_aabb_buffer ), sizeof( OptixAabb ) ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_aabb_buffer ), &aabb, sizeof( OptixAabb ), cudaMemcpyHostToDevice ) );
OptixBuildInput aabb_input = {};
aabb_input.type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
aabb_input.customPrimitiveArray.aabbBuffers = &d_aabb_buffer;
aabb_input.customPrimitiveArray.numPrimitives = 1;
uint32_t aabb_input_flags[1] = {OPTIX_GEOMETRY_FLAG_NONE};
aabb_input.customPrimitiveArray.flags = aabb_input_flags;
aabb_input.customPrimitiveArray.numSbtRecords = 1;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accel_options, &aabb_input, 1, &gas_buffer_sizes ) );
CUdeviceptr d_temp_buffer_gas;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer_gas ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = reinterpret_cast<CUdeviceptr>(
reinterpret_cast<char*>( d_buffer_temp_output_gas_and_compacted_size ) + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&aabb_input,
1, // num build inputs
d_temp_buffer_gas,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
state.params.radius = 1.5f;
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer_gas ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_aabb_buffer ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, reinterpret_cast<void*>( emitProperty.result ),
sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact(
state.context,
0, // CUDA stream
state.gas_handle,
state.d_gas_output_buffer,
compacted_gas_size,
&state.gas_handle
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_buffer_temp_output_gas_and_compacted_size ) ) );
}
else
{
state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void buildIAS( SampleState& state )
{
std::vector<OptixInstance> instances;
for( size_t i = 0; i < transforms.size(); ++i )
{
OptixInstance inst;
memcpy( inst.transform, &transforms[i], sizeof( float ) * 12 );
inst.instanceId = 0;
inst.visibilityMask = 1;
inst.sbtOffset = sbtOffsets[i];
inst.flags = OPTIX_INSTANCE_FLAG_NONE;
inst.traversableHandle = state.gas_handle;
instances.push_back( inst );
}
CUdeviceptr d_inst;
size_t instancesSizeInBytes = instances.size() * sizeof( OptixInstance );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_inst ), instancesSizeInBytes ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( d_inst ), &instances[0], instancesSizeInBytes, cudaMemcpyHostToDevice ) );
OptixBuildInput instanceInput = {};
instanceInput.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
instanceInput.instanceArray.instances = d_inst;
instanceInput.instanceArray.numInstances = static_cast<unsigned int>( instances.size() );
OptixAccelBuildOptions iasAccelOptions = {};
iasAccelOptions.buildFlags = OPTIX_BUILD_FLAG_NONE;
iasAccelOptions.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes ias_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &iasAccelOptions, &instanceInput, 1, &ias_buffer_sizes ) );
CUdeviceptr d_temp_buffer_ias;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer_ias ), ias_buffer_sizes.tempSizeInBytes ) );
// We need to free the output buffer if we are rebuilding the IAS.
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_ias_output_buffer ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_ias_output_buffer ), ias_buffer_sizes.outputSizeInBytes ) );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&iasAccelOptions,
&instanceInput,
1, // num build inputs
d_temp_buffer_ias,
ias_buffer_sizes.tempSizeInBytes,
state.d_ias_output_buffer,
ias_buffer_sizes.outputSizeInBytes,
&state.ias_handle,
nullptr,
0
) );
CUDA_SYNC_CHECK();
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer_ias ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_inst ) ) );
}
void createModule( SampleState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY;
state.pipeline_compile_options.numPayloadValues = 3; // memory size of payload (in trace())
state.pipeline_compile_options.numAttributeValues = 3; // memory size of attributes (from is())
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixDynamicMaterials.cu", inputSize );
char log[2048]; // For error reporting from OptiX creation functions
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX( state.context, &module_compile_options, &state.pipeline_compile_options,
input, inputSize, log, &sizeof_log, &state.module ) );
}
void createProgramGroups( SampleState& state )
{
OptixProgramGroupOptions program_group_options = {}; // Initialize to zeros
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
char log[2048]; // For error reporting from OptiX creation functions
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.raygen_prog_group ) );
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__ms";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.miss_prog_group ) );
// hard-coded list of different CH programs for different OptixInstances
std::vector<const char*> chNames = {// The left sphere has a single CH program
"__closesthit__ch",
// The middle sphere toggles between two CH programs
"__closesthit__ch", "__closesthit__normal",
// The right sphere uses the g_materialIndex_2.getVal()'th of these CH programs
"__closesthit__blue", "__closesthit__green", "__closesthit__red"};
std::vector<OptixProgramGroupDesc> hitgroup_prog_group_descs;
for( auto chName : chNames )
{
OptixProgramGroupDesc hitgroup_prog_group_desc = {};
hitgroup_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hitgroup_prog_group_desc.hitgroup.moduleCH = state.module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameCH = chName;
hitgroup_prog_group_desc.hitgroup.moduleAH = nullptr;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameAH = nullptr;
hitgroup_prog_group_desc.hitgroup.moduleIS = state.module;
hitgroup_prog_group_desc.hitgroup.entryFunctionNameIS = "__intersection__is";
hitgroup_prog_group_descs.push_back( hitgroup_prog_group_desc );
}
sizeof_log = sizeof( log );
state.hitgroup_prog_groups.resize( hitgroup_prog_group_descs.size() );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &hitgroup_prog_group_descs[0],
static_cast<unsigned int>( hitgroup_prog_group_descs.size() ),
&program_group_options, log, &sizeof_log, &state.hitgroup_prog_groups[0] ) );
}
void createPipeline( SampleState& state )
{
const uint32_t max_trace_depth = 1;
std::vector<OptixProgramGroup> program_groups;
program_groups.push_back( state.raygen_prog_group );
program_groups.push_back( state.miss_prog_group );
for( auto g : state.hitgroup_prog_groups )
program_groups.push_back( g );
OptixPipelineLinkOptions pipeline_link_options;
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048]; // For error reporting from OptiX creation functions
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate( state.context, &state.pipeline_compile_options, &pipeline_link_options,
&program_groups[0], static_cast<unsigned int>( program_groups.size() ), log,
&sizeof_log, &state.pipeline ) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
void createSbt( SampleState& state )
{
CUdeviceptr raygen_record;
const size_t raygen_record_size = sizeof( RayGenSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &raygen_record ), raygen_record_size ) );
RayGenSbtRecord rg_sbt;
rg_sbt.data = {};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( raygen_record ), &rg_sbt, raygen_record_size, cudaMemcpyHostToDevice ) );
state.sbt.raygenRecord = raygen_record;
CUdeviceptr miss_record;
size_t miss_record_size = sizeof( MissSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &miss_record ), miss_record_size ) );
MissSbtRecord ms_sbt;
ms_sbt.data = { 0.3f, 0.1f, 0.2f }; // Background color
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_prog_group, &ms_sbt ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( miss_record ), &ms_sbt, miss_record_size, cudaMemcpyHostToDevice ) );
state.sbt.missRecordBase = miss_record;
state.sbt.missRecordStrideInBytes = sizeof( MissSbtRecord );
state.sbt.missRecordCount = 1;
const size_t hitGroupSbtRecordCount = 4;
std::vector<HitGroupSbtRecord> hg_sbt( hitGroupSbtRecordCount );
size_t hg_sbt_size = hg_sbt.size();
// The left sphere cycles through three colors by updating the data field of the SBT record.
hg_sbt[0].data = { g_colors[0], 0u };
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_groups[0], &hg_sbt[0] ) );
// The middle sphere toggles between two SBT records by adjusting the SBT
// offset field of the sphere instance. The IAS needs to be rebuilt for the
// update to take effect.
hg_sbt[1].data = { g_colors[1], 1u };
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_groups[1], &hg_sbt[1] ) );
hg_sbt[2].data = { g_colors[1], 1u };
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_groups[2], &hg_sbt[2] ) );
// The right sphere cycles through colors by modifying the SBT. On update, a
// different pre-built CH program is packed into the corresponding SBT
// record.
hg_sbt[3].data = { { 0.f, 0.f, 0.f }, 2u };
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_groups[g_materialIndex_2.getVal() + 3], &hg_sbt[3] ) );
CUdeviceptr hitgroup_record;
state.hitgroupSbtRecordStride = sizeof( HitGroupSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &hitgroup_record ), state.hitgroupSbtRecordStride * hg_sbt_size ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( hitgroup_record ), &hg_sbt[0],
state.hitgroupSbtRecordStride * hg_sbt_size, cudaMemcpyHostToDevice ) );
state.sbt.hitgroupRecordBase = hitgroup_record;
state.sbt.hitgroupRecordStrideInBytes = static_cast<unsigned int>( state.hitgroupSbtRecordStride );
state.sbt.hitgroupRecordCount = static_cast<unsigned int>( hg_sbt_size );
}
void updateHitGroupData( SampleState& state )
{
// Method 1:
// Change the material parameters for the left sphere by directly modifying
// the HitGroupData for the first SBT record.
// Cycle through three base colors.
g_materialIndex_0.nextVal();
HitGroupData hg_data = HitGroupData { g_colors[g_materialIndex_0.getVal()], 0u };
// Update the data field of the SBT record for the left sphere with the new base color.
HitGroupSbtRecord* hg_sbt_0_ptr = &reinterpret_cast<HitGroupSbtRecord*>( state.sbt.hitgroupRecordBase )[0];
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( &hg_sbt_0_ptr->data ),
&hg_data, sizeof( HitGroupData ), cudaMemcpyHostToDevice ) );
g_hasDataChanged = false;
}
void updateInstanceOffset( SampleState& state )
{
// Method 2:
// Update the SBT offset of the middle sphere. The offset is used to select
// an SBT record during traversal, which dertermines the CH & AH programs
// that will be invoked for shading.
g_materialIndex_1.nextVal();
sbtOffsets[1] = g_materialIndex_1.getVal() + 1;
// It's necessary to rebuild the IAS for the updated offset to take effect.
buildIAS( state );
g_hasOffsetChanged = false;
}
void updateSbtHeader( SampleState& state )
{
// Method 3:
// Select a new material by re-packing the SBT header for the right sphere
// with a different CH program.
// The right sphere will use the next compiled program group.
g_materialIndex_2.nextVal();
HitGroupSbtRecord hg_sbt_2;
hg_sbt_2.data = { { 0.f, 0.f, 0.f }, 2u }; // The color is hard-coded in the CH program.
OPTIX_CHECK( optixSbtRecordPackHeader( state.hitgroup_prog_groups[g_materialIndex_2.getVal() + 3], &hg_sbt_2 ) );
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase + sizeof( HitGroupSbtRecord ) * sbtOffsets[2] ),
&hg_sbt_2, sizeof( HitGroupSbtRecord ), cudaMemcpyHostToDevice ) );
g_hasSbtChanged = false;
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, SampleState& state )
{
// Change the material properties using one of three different approaches.
if( g_hasDataChanged )
updateHitGroupData( state );
if( g_hasOffsetChanged )
updateInstanceOffset( state );
if( g_hasSbtChanged )
updateSbtHeader( state );
}
void initLaunch( SampleState& state )
{
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
state.params.handle = state.ias_handle;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_param ), sizeof( Params ) ) );
}
void launch( SampleState& state, sutil::CUDAOutputBuffer<uchar4>& output_buffer )
{
state.params.image = output_buffer.map();
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( state.d_param ), &state.params, sizeof( Params ), cudaMemcpyHostToDevice ) );
assert( state.sbt.hitgroupRecordStrideInBytes % OPTIX_SBT_RECORD_ALIGNMENT == 0 );
assert( state.sbt.hitgroupRecordBase % OPTIX_SBT_RECORD_ALIGNMENT == 0 );
OPTIX_CHECK( optixLaunch( state.pipeline, state.stream, state.d_param, sizeof( Params ), &state.sbt,
state.params.image_width, state.params.image_height, /*depth=*/1 ) );
CUDA_SYNC_CHECK();
output_buffer.unmap();
}
void displayUsage()
{
static char display_text[256];
sutil::beginFrameImGui();
{
sprintf( display_text,
"Use the arrow keys to modify the materials:\n"
" [LEFT] left sphere\n"
" [UP] middle sphere\n"
" [RIGHT] right sphere\n" );
}
sutil::displayText( display_text, 20.0f, 20.0f );
sutil::endFrameImGui();
}
void display( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display( static_cast<int>( output_buffer.width() ), static_cast<int>( output_buffer.height() ),
framebuf_res_x, framebuf_res_y, output_buffer.getPBO() );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
else if( key == GLFW_KEY_LEFT )
{
g_hasDataChanged = true;
}
else if( key == GLFW_KEY_RIGHT )
{
g_hasSbtChanged = true;
}
else if( key == GLFW_KEY_UP )
{
g_hasOffsetChanged = true;
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
std::string getIndexedFilename( const std::string& name )
{
static unsigned int counter = 0;
size_t pos = name.find_last_of( '.' );
if( pos == std::string::npos )
{
std::cerr << "Cannot find image format suffix" << std::endl;
return name;
}
std::string suffix = name.substr( pos );
std::string root = name.substr( 0, pos );
std::stringstream s;
s << '_' << counter++ << suffix;
return root + s.str();
}
void printBuffer( sutil::CUDAOutputBuffer<uchar4>& output_buffer, const std::string& outfile )
{
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = static_cast<unsigned int>( output_buffer.width() );
buffer.height = static_cast<unsigned int>( output_buffer.height() );
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
}
int main( int argc, char* argv[] )
{
SampleState state( 1024, 768 );
std::string outfile;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
for( int i = 1; i < argc; ++i )
{
const std::string arg( argv[i] );
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--file" || arg == "-f" )
{
if( i < argc - 1 )
{
outfile = argv[++i];
}
else
{
printUsageAndExit( argv[0] );
}
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int width, height;
sutil::parseDimensions( dims_arg.c_str(), width, height );
state.params.image_width = width;
state.params.image_height = height;
}
else
{
std::cerr << "Unknown option '" << arg << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCamera( state );
createContext( state );
buildGAS( state );
buildIAS( state );
createModule( state );
createProgramGroups( state );
createPipeline( state );
createSbt( state );
initLaunch( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixDynamicMaterials", state.params.image_width, state.params.image_height );
glfwSetKeyCallback( window, keyCallback );
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.image_width,
state.params.image_height );
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
while( !glfwWindowShouldClose( window ) )
{
glfwPollEvents();
updateState( output_buffer, state );
launch( state, output_buffer );
display( output_buffer, gl_display, window );
displayUsage();
glfwSwapBuffers( window );
}
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.image_width, state.params.image_height );
updateState( output_buffer, state );
launch( state, output_buffer );
// Original setup - R, G, B spheres from left to right.
printBuffer( output_buffer, getIndexedFilename( outfile ) );
// Now add "dynamism" - first cycle through three colors of sphere 0
g_hasDataChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
g_hasDataChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
g_hasDataChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
// Now cycle through three SBT entries for sphere 2
g_hasSbtChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
g_hasSbtChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
// This should give us an image identical to the original one
g_hasSbtChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
// Toggle the material on the middle sphere
g_hasOffsetChanged = true;
updateState( output_buffer, state );
launch( state, output_buffer );
printBuffer( output_buffer, getIndexedFilename( outfile ) );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
//
// Cleanup
//
{
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_ias_output_buffer ) ) );
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
for( auto grp : state.hitgroup_prog_groups )
OPTIX_CHECK( optixProgramGroupDestroy( grp ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.miss_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixModuleDestroy( state.module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
}
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicMaterials/optixDynamicMaterials.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixDynamicMaterials.h"
#include <cuda/helpers.h>
#include <sutil/vec_math.h>
extern "C" __constant__ Params params;
static __forceinline__ __device__ void trace( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, float3* prd )
{
unsigned int p0, p1, p2;
p0 = float_as_int( prd->x );
p1 = float_as_int( prd->y );
p2 = float_as_int( prd->z );
optixTrace( handle, ray_origin, ray_direction, tmin, tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE,
0, // SBT offset
0, // SBT stride
0, // missSBTIndex
p0, p1, p2 );
prd->x = int_as_float( p0 );
prd->y = int_as_float( p1 );
prd->z = int_as_float( p2 );
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
static __forceinline__ __device__ float3 getPayload()
{
return make_float3( int_as_float( optixGetPayload_0() ),
int_as_float( optixGetPayload_1() ),
int_as_float( optixGetPayload_2() ) );
}
static __forceinline__ __device__ float3 getShadingNormal()
{
return make_float3( int_as_float( optixGetAttribute_0() ),
int_as_float( optixGetAttribute_1() ),
int_as_float( optixGetAttribute_2() ) );
}
extern "C" __global__ void __raygen__rg()
{
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
const float3 U = params.camera_u;
const float3 V = params.camera_v;
const float3 W = params.camera_w;
const float2 d = 2.0f * make_float2( static_cast<float>( idx.x ) / static_cast<float>( dim.x ),
static_cast<float>( idx.y ) / static_cast<float>( dim.y ) ) - 1.0f;
const float3 origin = params.cam_eye;
const float3 direction = normalize( d.x * U + d.y * V + W );
float3 payload_rgb = make_float3( 0.5f, 0.5f, 0.5f );
trace( params.handle, origin, direction,
0.00f, // tmin
1e16f, // tmax
&payload_rgb );
params.image[idx.y * params.image_width + idx.x] = make_color( payload_rgb );
}
extern "C" __global__ void __miss__ms()
{
MissData* missData = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
float3 payload = getPayload();
setPayload( missData->color );
}
extern "C" __global__ void __intersection__is()
{
HitGroupData* hgData = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
const float3 orig = optixGetObjectRayOrigin();
const float3 dir = optixGetObjectRayDirection();
const float3 center = {0.f, 0.f, 0.f};
const float3 O = orig - center;
const float l = 1 / length( dir );
const float3 D = dir * l;
const float b = dot( O, D );
const float c = dot( O, O ) - params.radius * params.radius;
const float disc = b * b - c;
if( disc > 0.0f )
{
const float sdisc = sqrtf( disc );
const float root1 = ( -b - sdisc );
const float root11 = 0.0f;
const float3 shading_normal = ( O + ( root1 + root11 ) * D ) / params.radius;
unsigned int p0, p1, p2;
p0 = float_as_int( shading_normal.x );
p1 = float_as_int( shading_normal.y );
p2 = float_as_int( shading_normal.z );
optixReportIntersection( root1, // t hit
0, // user hit kind
p0, p1, p2 );
}
}
__forceinline__ __device__ void closesthit_impl( float3 baseColor )
{
float3 normals = normalize( optixTransformNormalFromObjectToWorldSpace( getShadingNormal() ) ) * 0.5f + 0.5f;
// add material effects
normals *= baseColor;
setPayload( normals );
}
extern "C" __global__ void __closesthit__ch()
{
HitGroupData* hgData = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
closesthit_impl( hgData->color );
}
extern "C" __global__ void __closesthit__normal()
{
float3 normals = normalize( optixTransformNormalFromObjectToWorldSpace( getShadingNormal() ) ) * 0.5f + 0.5f;
setPayload( normals );
}
extern "C" __global__ void __closesthit__red()
{
closesthit_impl( make_float3( 1.f, 0.f, 0.f ) );
}
extern "C" __global__ void __closesthit__green()
{
closesthit_impl( make_float3( 0.f, 1.f, 0.f ) );
}
extern "C" __global__ void __closesthit__blue()
{
closesthit_impl( make_float3( 0.f, 0.f, 1.f ) );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixDynamicMaterials/optixDynamicMaterials.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
struct Params
{
uchar4* image;
unsigned int image_width;
unsigned int image_height;
float radius;
OptixTraversableHandle handle;
float3 cam_eye;
float3 camera_u, camera_v, camera_w;
unsigned int hitgroupRecordIdx_0;
unsigned int hitgroupRecordStride;
};
struct RayGenData
{
float3 cam_eye;
float3 camera_u, camera_v, camera_w;
};
struct MissData
{
float3 color;
};
struct HitGroupData
{
float3 color;
unsigned int geometryIndex;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Hair.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "Hair.h"
#include <sutil/sutil.h>
#include <algorithm>
#include <cstring>
#include <fstream>
#include <numeric>
#include <string>
#include "ProgramGroups.h"
#include "Util.h"
Hair::Hair( const OptixDeviceContext context, const std::string& fileName )
: m_context( context )
{
std::ifstream input( fileName.c_str(), std::ios::binary );
SUTIL_ASSERT_MSG( input.is_open(), "Unable to open " + fileName + "." );
input.read( reinterpret_cast<char*>( &m_header ), sizeof( FileHeader ) );
SUTIL_ASSERT( input );
SUTIL_ASSERT_MSG( strncmp( m_header.magic, "HAIR", 4 ) == 0, "Hair-file error: Invalid file format." + fileName );
m_header.fileInfo[87] = 0;
// Segments array(unsigned short)
// The segements array contains the number of linear segments per strand;
// thus there are segments + 1 control-points/vertices per strand.
auto strandSegments = std::vector<unsigned short>( numberOfStrands() );
if( hasSegments() )
{
input.read( reinterpret_cast<char*>( strandSegments.data() ), numberOfStrands() * sizeof( unsigned short ) );
SUTIL_ASSERT_MSG( input, "Hair-file error: Cannot read segments." );
}
else
{
std::fill( strandSegments.begin(), strandSegments.end(), defaultNumberOfSegments() );
}
// Compute strands vector<unsigned int>. Each element is the index to the
// first point of the first segment of the strand. The last entry is the
// index "one beyond the last vertex".
m_strands = std::vector<int>( strandSegments.size() + 1 );
auto strand = m_strands.begin();
*strand++ = 0;
for( auto segments : strandSegments )
{
*strand = *( strand - 1 ) + 1 + segments;
strand++;
}
// Points array(float)
SUTIL_ASSERT_MSG( hasPoints(), "Hair-file error: File contains no points." );
m_points = std::vector<float3>( numberOfPoints() );
input.read( reinterpret_cast<char*>( m_points.data() ), numberOfPoints() * sizeof( float3 ) );
SUTIL_ASSERT_MSG( input, "Hair-file error: Cannot read points." );
// Thickness array(float)
m_thickness = std::vector<float>( numberOfPoints() );
if( hasThickness() )
{
input.read( reinterpret_cast<char*>( m_thickness.data() ), numberOfPoints() * sizeof( float ) );
SUTIL_ASSERT_MSG( input, "Hair-file error: Cannot read thickness." );
}
else
{
std::fill( m_thickness.begin(), m_thickness.end(), defaultThickness() );
}
//
// Compute the axis-aligned bounding box for this hair geometry.
//
for( auto point : m_points )
{
m_aabb.include( point );
}
// expand the aabb by the maximum hair radius
float max_width = defaultThickness();
if( hasThickness() )
{
max_width = *std::max_element( m_thickness.begin(), m_thickness.end() );
}
m_aabb.m_min = m_aabb.m_min - make_float3( max_width );
m_aabb.m_max = m_aabb.m_max + make_float3( max_width );
}
Hair::~Hair() {}
void Hair::gatherProgramGroups( HairProgramGroups* pProgramGroups ) const
{
OptixProgramGroupDesc programGroupDesc = {};
//
// Shader blending red and green via segment u.
//
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_shadingModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__curve_segment_u";
if( QUADRATIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_quadraticCurveModule;
else if( CUBIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_cubicCurveModule;
else if( LINEAR_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_linearCurveModule;
programGroupDesc.hitgroup.entryFunctionNameIS = 0; // automatically supplied for built-in moduleq
pProgramGroups->add( programGroupDesc, programName() + "SegmentU" );
//
// Shader blending red and green via with root-to-tip (strand) u.
//
memset( &programGroupDesc, 0, sizeof( OptixProgramGroupDesc ) );
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_shadingModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__curve_strand_u";
if( QUADRATIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_quadraticCurveModule;
else if( CUBIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_cubicCurveModule;
else if( LINEAR_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_linearCurveModule;
programGroupDesc.hitgroup.entryFunctionNameIS = 0; // automatically supplied for built-in moduleq
pProgramGroups->add( programGroupDesc, programName() + "StrandU" );
//
// Shader switching color based on strand index..
//
memset( &programGroupDesc, 0, sizeof( OptixProgramGroupDesc ) );
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_shadingModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__curve_strand_idx";
if( QUADRATIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_quadraticCurveModule;
else if( CUBIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_cubicCurveModule;
else if( LINEAR_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_linearCurveModule;
programGroupDesc.hitgroup.entryFunctionNameIS = 0; // automatically supplied for built-in moduleq
pProgramGroups->add( programGroupDesc, programName() + "StrandIndex" );
// Occlusion shader for shadow rays
memset( &programGroupDesc, 0, sizeof( OptixProgramGroupDesc ) );
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_whittedModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
if( QUADRATIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_quadraticCurveModule;
else if( CUBIC_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_cubicCurveModule;
else if( LINEAR_BSPLINE == m_splineMode )
programGroupDesc.hitgroup.moduleIS = pProgramGroups->m_linearCurveModule;
programGroupDesc.hitgroup.entryFunctionNameIS = 0; // automatically supplied for built-in modul
pProgramGroups->add( programGroupDesc, "occludeCurve" );
}
std::string Hair::programName() const
{
switch( m_splineMode ) {
case LINEAR_BSPLINE:
return "hitLinearCurve";
case QUADRATIC_BSPLINE:
return "hitQuadraticCurve";
case CUBIC_BSPLINE:
return "hitCubicCurve";
default:
SUTIL_ASSERT_MSG( false, "Invalid b-spline mode" );
}
return "";
}
std::string Hair::programSuffix() const
{
switch( m_shadeMode ) {
case SEGMENT_U:
return "SegmentU";
case STRAND_U:
return "StrandU";
case STRAND_IDX:
return "StrandIndex";
default:
SUTIL_ASSERT_MSG( false, "Invalid hair-shading mode" );
}
return "";
}
uint32_t Hair::numberOfStrands() const
{
return m_header.numStrands;
}
uint32_t Hair::numberOfPoints() const
{
return m_header.numPoints;
}
uint32_t Hair::defaultNumberOfSegments() const
{
return m_header.defaultNumSegments;
}
float Hair::defaultThickness() const
{
return m_header.defaultThickness;
}
float Hair::defaultAlpha() const
{
return m_header.defaultAlpha;
}
float3 Hair::defaultColor() const
{
return make_float3( m_header.defaultColor.x, m_header.defaultColor.y, m_header.defaultColor.z );
}
std::string Hair::fileInfo() const
{
return std::string( m_header.fileInfo );
}
bool Hair::hasSegments() const
{
return ( m_header.flags & ( 0x1 << 0 ) ) > 0;
}
bool Hair::hasPoints() const
{
return ( m_header.flags & ( 0x1 << 1 ) ) > 0;
}
bool Hair::hasThickness() const
{
return ( m_header.flags & ( 0x1 << 2 ) ) > 0;
}
bool Hair::hasAlpha() const
{
return ( m_header.flags & ( 0x1 << 3 ) ) > 0;
}
bool Hair::hasColor() const
{
return ( m_header.flags & ( 0x1 << 4 ) ) > 0;
}
std::vector<float3> Hair::points() const
{
return m_points;
}
std::vector<float> Hair::widths() const
{
return m_thickness;
}
int Hair::numberOfSegments() const
{
return numberOfPoints() - numberOfStrands() * curveDegree();
}
// Compute a list of all segment indices making up the curves array.
//
// The structure of the list is as follows:
// * For each strand all segments are listed in order from root to tip.
// * Segment indices are identical to the index of the first control-point
// of a segment.
// * The number of segments per strand is dependent on the curve degree; e.g.
// a cubic segment requires four control points, thus a cubic strand with n
// control points will have (n - 3) segments.
//
std::vector<int> Hair::segments() const
{
std::vector<int> segments;
// loop to one before end, as last strand value is the "past last valid vertex"
// index
for( auto strand = m_strands.begin(); strand != m_strands.end() - 1; ++strand )
{
const int start = *( strand ); // first vertex in first segment
const int end = *( strand + 1 ) - curveDegree(); // second vertex of last segment
for( int i = start; i < end; ++i )
{
segments.push_back( i );
}
}
return segments;
}
std::vector<float2> Hair::strandU() const
{
std::vector<float2> strand_u;
for( auto strand = m_strands.begin(); strand != m_strands.end() - 1; ++strand )
{
const int start = *( strand );
const int end = *( strand + 1 ) - curveDegree();
const int segments = end - start; // number of strand's segments
const float scale = 1.0f / segments;
for( int i = 0; i < segments; ++i )
{
strand_u.push_back( make_float2( i * scale, scale ) );
}
}
return strand_u;
}
std::vector<int> Hair::strandIndices() const
{
std::vector<int> strandIndices;
int strandIndex = 0;
for( auto strand = m_strands.begin(); strand != m_strands.end() - 1; ++strand )
{
const int start = *( strand );
const int end = *( strand + 1 ) - curveDegree();
for( auto segment = start; segment != end; ++segment )
{
strandIndices.push_back( strandIndex );
}
++strandIndex;
}
return strandIndices;
}
std::vector<uint2> Hair::strandInfo() const
{
std::vector<uint2> strandInfo;
unsigned int firstPrimitiveIndex = 0;
for( auto strand = m_strands.begin(); strand != m_strands.end() - 1; ++strand )
{
uint2 info;
info.x = firstPrimitiveIndex; // strand's start index
info.y = *( strand + 1 ) - *(strand)-curveDegree(); // number of segments in strand
firstPrimitiveIndex += info.y; // increment with number of primitives/segments in strand
strandInfo.push_back( info );
}
return strandInfo;
}
void Hair::setRadiusMode( Radius radiusMode )
{
if( m_radiusMode != radiusMode )
{
m_radiusMode = radiusMode;
if( CONSTANT_R == m_radiusMode )
{
// assign all radii the root radius
const float r = m_thickness[0];
for( auto ir = m_thickness.begin(); ir != m_thickness.end(); ++ir )
*ir = r;
}
else if( TAPERED_R == m_radiusMode )
{
const float r = m_thickness[0];
for( auto strand = m_strands.begin(); strand != m_strands.end() - 1; ++strand )
{
const int rootVertex = *( strand );
const int vertices = *( strand + 1 ) - rootVertex; // vertices in strand
for( int i = 0; i < vertices; ++i )
{
m_thickness[rootVertex + i] = r * ( vertices - 1 - i ) / static_cast<float>( vertices - 1 );
}
}
}
}
}
std::string toString( bool b )
{
std::string result;
if( b )
result = "true";
else
result = "false";
return result;
}
std::ostream& operator<<( std::ostream& o, Hair::SplineMode splineMode )
{
switch( splineMode )
{
case Hair::LINEAR_BSPLINE:
o << "LINEAR_BSPLINE";
break;
case Hair::QUADRATIC_BSPLINE:
o << "QUADRATIC_BSPLINE";
break;
case Hair::CUBIC_BSPLINE:
o << "CUBIC_BSPLINE";
break;
default:
SUTIL_ASSERT_MSG( false, "Invalid spline mode." );
}
return o;
}
std::ostream& operator<<( std::ostream& o, const Hair& hair )
{
o << "Hair: " << std::endl;
o << "Number of strands: " << hair.numberOfStrands() << std::endl;
o << "Number of points: " << hair.numberOfPoints() << std::endl;
o << "Spline mode: " << hair.m_splineMode << std::endl;
o << "Contains segments: " << toString( hair.hasSegments() ) << std::endl;
o << "Contains points: " << toString( hair.hasPoints() ) << std::endl;
o << "Contains alpha: " << toString( hair.hasAlpha() ) << std::endl;
o << "Contains color: " << toString( hair.hasColor() ) << std::endl;
o << "Default number of segments: " << hair.defaultNumberOfSegments() << std::endl;
o << "Default thickness: " << hair.defaultThickness() << std::endl;
o << "Default alpha: " << hair.defaultAlpha() << std::endl;
float3 color = hair.defaultColor();
o << "Default color: (" << color.x << ", " << color.y << ", " << color.z << ")" << std::endl;
std::string fileInfo = hair.fileInfo();
o << "File info: ";
if( fileInfo.empty() )
o << "n/a" << std::endl;
else
o << fileInfo << std::endl;
o << "Strands: [" << hair.m_strands[0] << "..." << hair.m_strands[hair.m_strands.size() - 1] << "]" << std::endl;
o << "Points: [" << hair.m_points[0] << "..." << hair.m_points[hair.m_points.size() - 1] << "]" << std::endl;
o << "Thickness: [" << hair.m_thickness[0] << "..." << hair.m_thickness[hair.m_thickness.size() - 1] << "]" << std::endl;
o << "Bounding box: [" << hair.m_aabb.m_min << ", " << hair.m_aabb.m_max << "]" << std::endl;
return o;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Hair.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <optix_stubs.h>
#include <sutil/Aabb.h>
#include <sutil/Exception.h>
#include <sutil/Matrix.h>
#include <ostream>
#include <string>
#include <vector>
// forwrad declarations
class Context;
class HairProgramGroups;
class Hair
{
public:
enum SplineMode
{
LINEAR_BSPLINE,
QUADRATIC_BSPLINE,
CUBIC_BSPLINE
};
enum Shade
{
SEGMENT_U,
STRAND_U,
STRAND_IDX
};
enum Radius
{
CONSTANT_R,
TAPERED_R
};
Hair( const OptixDeviceContext context, const std::string& fileName );
virtual ~Hair();
// Factory method for loading Hair from file.
static Hair Load( const std::string& fileName, const OptixDeviceContext context );
void setSplineMode( SplineMode splineMode ) { m_splineMode = splineMode; };
SplineMode splineMode() const { return m_splineMode; };
void setShadeMode( Shade shadeMode ) { m_shadeMode = shadeMode; };
Shade shadeMode() const { return m_shadeMode; };
void setRadiusMode( Radius radiusMode );
Radius radiusMode() const { return m_radiusMode; };
uint32_t numberOfStrands() const;
uint32_t numberOfPoints() const;
std::string fileInfo() const;
std::vector<float3> points() const;
std::vector<float> widths() const;
int numberOfSegments() const;
// Compute a vector containing vertex indices for all segments
// making up the hair geometry. E.g.
// [h0s0, h0s1, ..., h0sn0, h1s0, h1s1, ..., h1sn1, h2s0, ...]
//
std::vector<int> segments() const;
std::vector<float2> strandU() const;
std::vector<int> strandIndices() const;
std::vector<uint2> strandInfo() const;
virtual void gatherProgramGroups( HairProgramGroups* pProgramGroups ) const;
std::string programName() const;
std::string programSuffix() const;
sutil::Aabb aabb() const { return m_aabb; }
unsigned int usesPrimitiveTypes() const
{
switch( m_splineMode ) {
case LINEAR_BSPLINE:
return OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_LINEAR;
case QUADRATIC_BSPLINE:
return OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_QUADRATIC_BSPLINE;
case CUBIC_BSPLINE:
return OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE;
default:
SUTIL_ASSERT_MSG(false, "Invalid b-spline mode.");
}
}
protected:
bool hasSegments() const;
bool hasPoints() const;
bool hasThickness() const;
bool hasAlpha() const;
bool hasColor() const;
OptixTraversableHandle gas() const;
uint32_t defaultNumberOfSegments() const;
float defaultThickness() const;
float defaultAlpha() const;
float3 defaultColor() const;
void makeOptix() const;
void clearOptix();
private:
// .hair format spec here: http://www.cemyuksel.com/research/hairmodels/
struct FileHeader
{
// Bytes 0 - 3 Must be "HAIR" in ascii code(48 41 49 52)
char magic[4];
// Bytes 4 - 7 Number of hair strands as unsigned int
uint32_t numStrands;
// Bytes 8 - 11 Total number of points of all strands as unsigned int
uint32_t numPoints;
// Bytes 12 - 15 Bit array of data in the file
// Bit - 5 to Bit - 31 are reserved for future extension(must be 0).
uint32_t flags;
// Bytes 16 - 19 Default number of segments of hair strands as unsigned int
// If the file does not have a segments array, this default value is used.
uint32_t defaultNumSegments;
// Bytes 20 - 23 Default thickness hair strands as float
// If the file does not have a thickness array, this default value is used.
float defaultThickness;
// Bytes 24 - 27 Default transparency hair strands as float
// If the file does not have a transparency array, this default value is used.
float defaultAlpha;
// Bytes 28 - 39 Default color hair strands as float array of size 3
// If the file does not have a color array, this default value is used.
float3 defaultColor;
// Bytes 40 - 127 File information as char array of size 88 in ascii
char fileInfo[88];
};
FileHeader m_header;
std::vector<int> m_strands;
std::vector<float3> m_points;
std::vector<float> m_thickness;
SplineMode m_splineMode = CUBIC_BSPLINE;
unsigned int curveDegree() const
{
switch( m_splineMode ) {
case LINEAR_BSPLINE:
return 1;
case QUADRATIC_BSPLINE:
return 2;
case CUBIC_BSPLINE:
return 3;
default:
SUTIL_ASSERT_MSG( false, "Invalid spline mode." );
}
}
Shade m_shadeMode = SEGMENT_U;
Radius m_radiusMode = CONSTANT_R;
mutable sutil::Aabb m_aabb;
OptixDeviceContext m_context = 0;
friend std::ostream& operator<<( std::ostream& o, const Hair& hair );
};
// Ouput operator for Hair
std::ostream& operator<<( std::ostream& o, const Hair& hair );
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Head.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "Head.h"
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_stubs.h>
#define TINYGLTF_IMPLEMENTATION
#if defined( WIN32 )
#pragma warning( push )
#pragma warning( disable : 4267 )
#endif
#include <support/tinygltf/tiny_gltf.h>
#define STB_IMAGE_IMPLEMENTATION
#include <tinygltf/stb_image.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <tinygltf/stb_image_write.h>
#if defined( WIN32 )
#pragma warning( pop )
#endif
#include <algorithm>
#include <cstring>
#include <fstream>
#include <limits>
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include "ProgramGroups.h"
#include "Util.h"
#include "optixHair.h"
Head::Head( const OptixDeviceContext context, const std::string& fileName )
{
tinygltf::Model model;
tinygltf::TinyGLTF loader;
std::string err;
std::string warn;
bool ret = loader.LoadASCIIFromFile( &model, &err, &warn, fileName );
if( !warn.empty() )
std::cout << "glTF WARNING: " << warn << std::endl;
if( !ret )
{
std::cout << "Failed to load GLTF scene '" << fileName << "': " << err << std::endl;
throw sutil::Exception( err.c_str() );
}
//
// Process buffer data first -- buffer views will reference this list
//
SUTIL_ASSERT( 1 == model.buffers.size() );
createOnDevice( model.buffers[0].data, &m_buffer );
SUTIL_ASSERT( model.nodes.size() == 1 );
SUTIL_ASSERT( model.nodes[0].mesh != -1 );
const auto& gltfMesh = model.meshes[model.nodes[0].mesh];
std::cout << "Processing glTF mesh: '" << gltfMesh.name << "'\n";
std::cout << "\tNum mesh primitive groups: " << gltfMesh.primitives.size() << std::endl;
SUTIL_ASSERT( gltfMesh.primitives.size() == 1 );
auto primitive = gltfMesh.primitives[0];
SUTIL_ASSERT( primitive.mode == TINYGLTF_MODE_TRIANGLES );
// Indices
std::cout << "Processing index buffer" << std::endl;
SUTIL_ASSERT( primitive.indices != -1 );
auto& accessor = model.accessors[primitive.indices];
auto& bufferView = model.bufferViews[accessor.bufferView];
OptixBuildInput buildInput = {};
m_triangleMesh.indices.data = m_buffer + bufferView.byteOffset + accessor.byteOffset;
SUTIL_ASSERT( accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT );
m_triangleMesh.indices.elmt_byte_size = accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT ?
2 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT ?
4 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT ? 4 : 0;
SUTIL_ASSERT_MSG( m_triangleMesh.indices.elmt_byte_size != 0, "gltf accessor component type not supported" );
m_triangleMesh.indices.byte_stride = static_cast<unsigned int>( bufferView.byteStride ? bufferView.byteStride : m_triangleMesh.indices.elmt_byte_size );
SUTIL_ASSERT( accessor.count % 3 == 0 );
m_triangleMesh.indices.count = static_cast<unsigned int>( accessor.count );
// index buffer build input
buildInput.triangleArray.indexFormat =
m_triangleMesh.indices.elmt_byte_size == 2 ? OPTIX_INDICES_FORMAT_UNSIGNED_SHORT3 : OPTIX_INDICES_FORMAT_UNSIGNED_INT3;
buildInput.triangleArray.indexStrideInBytes = m_triangleMesh.indices.byte_stride * 3;
buildInput.triangleArray.numIndexTriplets = m_triangleMesh.indices.count / 3;
buildInput.triangleArray.indexBuffer = m_triangleMesh.indices.data;
const unsigned int triangleFlags = OPTIX_GEOMETRY_FLAG_NONE;
buildInput.triangleArray.flags = &triangleFlags;
buildInput.triangleArray.numSbtRecords = 1;
m_triangles = m_triangleMesh.indices.count;
// Vertex array
SUTIL_ASSERT( primitive.attributes.find( "POSITION" ) != primitive.attributes.end() );
const int32_t positionIndex = primitive.attributes.at( "POSITION" );
std::cout << "Processing position array" << positionIndex << std::endl;
SUTIL_ASSERT( positionIndex != -1 );
accessor = model.accessors[positionIndex];
bufferView = model.bufferViews[accessor.bufferView];
SUTIL_ASSERT( accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT );
m_triangleMesh.positions.data = m_buffer + bufferView.byteOffset + accessor.byteOffset;
m_triangleMesh.positions.elmt_byte_size = accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT ?
2 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT ?
4 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT ? 4 : 0;
m_triangleMesh.positions.elmt_byte_size *= 3;
SUTIL_ASSERT_MSG( m_triangleMesh.indices.elmt_byte_size != 0, "gltf accessor component type not supported" );
m_triangleMesh.positions.byte_stride = static_cast<unsigned int>( bufferView.byteStride ? bufferView.byteStride : m_triangleMesh.positions.elmt_byte_size );
m_triangleMesh.positions.count = static_cast<unsigned int>( accessor.count );
// bounding box
sutil::Aabb bb = sutil::Aabb( make_float3( (float) accessor.minValues[0], (float) accessor.minValues[1], (float) accessor.minValues[2] ),
make_float3( (float) accessor.maxValues[0], (float) accessor.maxValues[1], (float) accessor.maxValues[2] ) );
m_aabb.include( bb );
m_vertices = m_triangleMesh.positions.count;
// vertex buffer build input
buildInput.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
buildInput.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
SUTIL_ASSERT( m_triangleMesh.positions.byte_stride == sizeof( float3 ) );
buildInput.triangleArray.vertexStrideInBytes = m_triangleMesh.positions.byte_stride;
buildInput.triangleArray.numVertices = m_triangleMesh.positions.count;
buildInput.triangleArray.vertexBuffers = &m_triangleMesh.positions.data;
// Normal array
auto normalAccessorIter = primitive.attributes.find( "NORMAL" );
SUTIL_ASSERT( normalAccessorIter != primitive.attributes.end() );
const int32_t normalIndex = normalAccessorIter->second;
std::cout << "Processing normal array" << std::endl;
accessor = model.accessors[normalIndex];
bufferView = model.bufferViews[accessor.bufferView];
m_triangleMesh.normals.data = m_buffer + bufferView.byteOffset + accessor.byteOffset;
m_triangleMesh.normals.byte_stride = static_cast<unsigned int>( bufferView.byteStride ? bufferView.byteStride : sizeof( float3 ) );
m_triangleMesh.normals.count = static_cast<unsigned int>( accessor.count );
m_triangleMesh.positions.elmt_byte_size = accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT ?
2 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT ?
4 :
accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT ? 4 : 0;
m_triangleMesh.positions.elmt_byte_size *= 3;
SUTIL_ASSERT_MSG( m_triangleMesh.indices.elmt_byte_size != 0, "gltf accessor component type not supported" );
std::cout << "Build input type: " << buildInput.type << std::endl;
OptixAccelBufferSizes bufferSizes;
OptixAccelBuildOptions accelBuildOptions = {};
accelBuildOptions.buildFlags = OPTIX_BUILD_FLAG_NONE;
accelBuildOptions.operation = OPTIX_BUILD_OPERATION_BUILD;
OPTIX_CHECK( optixAccelComputeMemoryUsage( context, &accelBuildOptions, &buildInput, 1, &bufferSizes ) );
CUdeviceptr deviceTempBuffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &deviceTempBuffer ), bufferSizes.tempSizeInBytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &m_deviceBufferGAS ), bufferSizes.outputSizeInBytes ) );
OPTIX_CHECK( optixAccelBuild( context, 0, // CUDA stream
&accelBuildOptions, &buildInput, 1, deviceTempBuffer, bufferSizes.tempSizeInBytes,
m_deviceBufferGAS, bufferSizes.outputSizeInBytes, &m_hGAS,
nullptr, // emitted property list
0 ) ); // num emitted properties
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceTempBuffer ) ) );
}
Head::~Head()
{
CUDA_CHECK_NOTHROW( cudaFree( reinterpret_cast<void*>( m_buffer ) ) );
}
void Head::gatherProgramGroups( HairProgramGroups* pProgramGroups ) const
{
OptixProgramGroupDesc programGroupDesc = {};
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_whittedModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
pProgramGroups->add( programGroupDesc, "hitTriangle" );
memset( &programGroupDesc, 0, sizeof( OptixProgramGroupDesc ) );
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
programGroupDesc.hitgroup.moduleCH = pProgramGroups->m_whittedModule;
programGroupDesc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
pProgramGroups->add( programGroupDesc, "occludeTriangle" );
}
std::vector<HitRecord> Head::sbtHitRecords( const ProgramGroups& programs, size_t rayTypes ) const
{
SUTIL_ASSERT_MSG( 2 == rayTypes, "Head requires two ray types." );
std::vector<HitRecord> records;
HitRecord hitGroupRecord = {};
hitGroupRecord.data.geometry_data.type = GeometryData::TRIANGLE_MESH;
hitGroupRecord.data.geometry_data.triangle_mesh = m_triangleMesh;
hitGroupRecord.data.material_data.pbr.base_color = {0.5f, 0.5f, 0.5f};
hitGroupRecord.data.material_data.pbr.metallic = 0.2f;
hitGroupRecord.data.material_data.pbr.roughness = 1.0f;
OPTIX_CHECK( optixSbtRecordPackHeader( programs["hitTriangle"], &hitGroupRecord ) );
records.push_back( hitGroupRecord );
OPTIX_CHECK( optixSbtRecordPackHeader( programs["occludeTriangle"], &hitGroupRecord ) );
records.push_back( hitGroupRecord );
return records;
}
OptixTraversableHandle Head::traversable() const
{
return m_hGAS;
}
std::ostream& operator<<( std::ostream& o, const Head& head )
{
o << "Head: " << std::endl;
o << "Number of vertices: " << head.numberOfVertices() << std::endl;
o << "Number of triangles: " << head.numberOfTriangles() << std::endl;
o << "Bounding box: [" << head.m_aabb.m_min << ", " << head.m_aabb.m_max << "]" << std::endl;
return o;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Head.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include "optixHair.h"
#include <string>
#include <vector>
// forward declarations
class Context;
class ProgramGroups;
class Head
{
public:
Head( const OptixDeviceContext context, const std::string& fileName );
~Head();
virtual OptixTraversableHandle traversable() const;
virtual void gatherProgramGroups( HairProgramGroups* pProgramGroups ) const;
virtual std::vector<HitRecord> sbtHitRecords( const ProgramGroups& programs, size_t rayTypes ) const;
size_t numberOfVertices() const { return m_vertices; }
size_t numberOfTriangles() const { return m_triangles; }
virtual sutil::Aabb aabb() const { return m_aabb; }
virtual unsigned int usesPrimitiveTypes() const { return OPTIX_PRIMITIVE_TYPE_FLAGS_TRIANGLE; }
private:
size_t m_vertices = 0;
size_t m_triangles = 0;
CUdeviceptr m_buffer = 0;
sutil::Aabb m_aabb;
mutable OptixTraversableHandle m_hGAS = 0;
mutable CUdeviceptr m_deviceBufferGAS = 0;
GeometryData::TriangleMesh m_triangleMesh;
friend std::ostream& operator<<( std::ostream& o, const Head& head );
};
// Ouput operator for Head
std::ostream& operator<<( std::ostream& o, const Head& head );
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/ProgramGroups.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "ProgramGroups.h"
#include <optix.h>
#include <optix_stubs.h>
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include <cstring>
ProgramGroups::ProgramGroups( const OptixDeviceContext context,
OptixPipelineCompileOptions pipeOptions,
OptixProgramGroupOptions programGroupOptions = {} )
: m_context( context )
, m_pipeOptions( pipeOptions )
, m_programGroupOptions( programGroupOptions )
{
;
}
void ProgramGroups::add( const OptixProgramGroupDesc& programGroupDescriptor, const std::string& name )
{
// Only add a new program group, if one with `name` doesn't yet exist.
if( m_nameToIndex.find( name ) == m_nameToIndex.end() )
{
size_t last = m_programGroups.size();
m_nameToIndex[name] = static_cast<unsigned int>( last );
m_programGroups.resize( last + 1 );
OPTIX_CHECK_LOG2( optixProgramGroupCreate( m_context, &programGroupDescriptor,
1, // num program groups
&m_programGroupOptions, LOG, &LOG_SIZE, &m_programGroups[last] ) );
}
}
const OptixProgramGroup& ProgramGroups::operator[]( const std::string& name ) const
{
auto iter = m_nameToIndex.find( name );
SUTIL_ASSERT( iter != m_nameToIndex.end() );
size_t index = iter->second;
return m_programGroups[index];
}
const OptixProgramGroup* ProgramGroups::data() const
{
return &( m_programGroups[0] );
}
unsigned int ProgramGroups::size() const
{
return static_cast<unsigned int>( m_programGroups.size() );
}
//
// HairProgramGroups
//
HairProgramGroups::HairProgramGroups( const OptixDeviceContext context, OptixPipelineCompileOptions pipeOptions )
: ProgramGroups( context, pipeOptions )
{
//
// Create modules
//
const OptixModuleCompileOptions defaultOptions = {};
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixHair.cu", inputSize );
OPTIX_CHECK_LOG2( optixModuleCreateFromPTX( context,
&defaultOptions,
&pipeOptions,
input,
inputSize,
LOG, &LOG_SIZE,
&m_shadingModule ) );
input = sutil::getInputData( nullptr, nullptr, "whitted.cu", inputSize );
OPTIX_CHECK_LOG2( optixModuleCreateFromPTX( context,
&defaultOptions,
&pipeOptions,
input,
inputSize,
LOG, &LOG_SIZE,
&m_whittedModule ) );
if( pipeOptions.usesPrimitiveTypeFlags & OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_QUADRATIC_BSPLINE ) {
OptixBuiltinISOptions builtinISOptions = {};
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE;
OPTIX_CHECK( optixBuiltinISModuleGet( context, &defaultOptions, &pipeOptions, &builtinISOptions, &m_quadraticCurveModule ) );
}
if( pipeOptions.usesPrimitiveTypeFlags & OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE ) {
OptixBuiltinISOptions builtinISOptions = {};
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE;
OPTIX_CHECK( optixBuiltinISModuleGet( context, &defaultOptions, &pipeOptions, &builtinISOptions, &m_cubicCurveModule ) );
}
if( pipeOptions.usesPrimitiveTypeFlags & OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_LINEAR ) {
OptixBuiltinISOptions builtinISOptions = {};
builtinISOptions.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR;
OPTIX_CHECK( optixBuiltinISModuleGet( context, &defaultOptions, &pipeOptions, &builtinISOptions, &m_linearCurveModule ) );
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/ProgramGroups.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <map>
#include <string>
#include <vector>
class ProgramGroups
{
public:
const OptixProgramGroup* data() const;
unsigned int size() const;
const OptixProgramGroup& operator[]( const std::string& name ) const;
void add( const OptixProgramGroupDesc& programGroupDescriptor, const std::string& name );
protected:
ProgramGroups( const OptixDeviceContext context, OptixPipelineCompileOptions pipeOptions, OptixProgramGroupOptions programGroupOptions );
private:
const OptixDeviceContext m_context;
OptixPipelineCompileOptions m_pipeOptions;
OptixProgramGroupOptions m_programGroupOptions;
std::vector<OptixProgramGroup> m_programGroups;
std::map<std::string, unsigned int> m_nameToIndex;
};
class HairProgramGroups : public ProgramGroups
{
public:
HairProgramGroups( const OptixDeviceContext context, OptixPipelineCompileOptions pipeOptions );
OptixModule m_shadingModule;
OptixModule m_whittedModule;
OptixModule m_quadraticCurveModule;
OptixModule m_cubicCurveModule;
OptixModule m_linearCurveModule;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Renderers.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "Renderers.h"
#include "Hair.h"
#include <GLFW/glfw3.h>
#include <sutil/GLDisplay.h>
//
// Renderer base class
//
Renderer::Renderer( HairState* pState )
: m_pState( pState )
{}
void Renderer::render() const
{
renderFrame( m_pState );
}
//
// FileRenderer
//
FileRenderer::FileRenderer( HairState* pState )
: Renderer( pState )
{
}
void FileRenderer::renderFile( const std::string& fileName ) const
{
render();
// save result image
sutil::ImageBuffer buffer;
buffer.data = m_pState->outputBuffer.getHostPointer();
buffer.width = m_pState->width;
buffer.height = m_pState->height;
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( fileName.c_str(), buffer, false );
}
//void FileRenderer::update( HairState::Event event ){};
//
// WindowRenderer
//
WindowRenderer::WindowRenderer( HairState* pState )
: Renderer( pState )
{
// Initialize the trackball
m_trackball.setCamera( &pState->camera );
m_trackball.setMoveSpeed( 10.0f );
m_trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ), make_float3( 0.0f, 0.0f, 1.0f ), make_float3( 0.0f, 1.0f, 0.0f ) );
m_trackball.setGimbalLock(true);
m_window = sutil::initUI( "optixHair", pState->width, pState->height );
glfwSetMouseButtonCallback( m_window, mouseButtonCallback );
glfwSetCursorPosCallback( m_window, cursorPosCallback );
glfwSetWindowSizeCallback( m_window, windowSizeCallback );
glfwSetWindowIconifyCallback( m_window, windowIconifyCallback );
glfwSetKeyCallback( m_window, keyCallback );
glfwSetScrollCallback( m_window, scrollCallback );
glfwSetWindowUserPointer( m_window, this );
}
WindowRenderer::~WindowRenderer()
{
m_pState->outputBuffer.deletePBO();
sutil::cleanupUI( m_window );
}
void WindowRenderer::run() const
{
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateParams( m_pState );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
render();
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( m_window, &framebuf_res_x, &framebuf_res_y );
gl_display.display( m_pState->width, m_pState->height, framebuf_res_x, framebuf_res_y,
m_pState->outputBuffer.getPBO() );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers( m_window );
} while( !glfwWindowShouldClose( m_window ) );
CUDA_SYNC_CHECK();
}
//
// StaticMethods
//
WindowRenderer* WindowRenderer::GetRenderer( GLFWwindow* window )
{
return static_cast<WindowRenderer*>( glfwGetWindowUserPointer( window ) );
}
void WindowRenderer::mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
if( action == GLFW_PRESS )
{
pRenderer->m_mouseButton = button;
pRenderer->m_trackball.startTracking( static_cast<int>( xpos ), static_cast<int>( ypos ) );
pState->params.subframe_index = 0u;
}
else
{
pRenderer->m_mouseButton = -1;
}
}
void WindowRenderer::cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
if( pRenderer->m_mouseButton == GLFW_MOUSE_BUTTON_LEFT )
{
pRenderer->m_trackball.setViewMode( sutil::Trackball::LookAtFixed );
pRenderer->m_trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ),
pRenderer->m_pState->width, pRenderer->m_pState->height );
pState->params.subframe_index = 0u;
}
else if( pRenderer->m_mouseButton == GLFW_MOUSE_BUTTON_RIGHT )
{
pRenderer->m_trackball.setViewMode( sutil::Trackball::EyeFixed );
pRenderer->m_trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ),
pRenderer->m_pState->width, pRenderer->m_pState->height );
pState->params.subframe_index = 0u;
}
}
void WindowRenderer::windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
// Keep rendering at the current resolution when the window is minimized.
if( pRenderer->m_minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
updateSize( pState, res_x, res_y );
pState->params.subframe_index = 0u;
}
void WindowRenderer::windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
pRenderer->m_minimized = ( iconified > 0 );
pState->params.subframe_index = 0u;
}
void WindowRenderer::keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
if( action == GLFW_PRESS )
{
switch( key )
{
case GLFW_KEY_Q:
case GLFW_KEY_ESCAPE:
{
glfwSetWindowShouldClose( window, true );
}
break;
case GLFW_KEY_1:
{
pState->pHair->setSplineMode( Hair::LINEAR_BSPLINE );
makeHairGAS( pState );
makeInstanceAccelerationStructure( pState );
pState->params.handle = pState->hIAS;
makeProgramGroups( pState );
makePipeline( pState );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to linear b-spline geometry." << std::endl;
}
break;
case GLFW_KEY_2:
{
pState->pHair->setSplineMode( Hair::QUADRATIC_BSPLINE );
makeHairGAS( pState );
makeInstanceAccelerationStructure( pState );
pState->params.handle = pState->hIAS;
makeProgramGroups( pState );
makePipeline( pState );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to quadratic b-spline geometry." << std::endl;
}
break;
case GLFW_KEY_3:
{
pState->pHair->setSplineMode( Hair::CUBIC_BSPLINE );
makeHairGAS( pState );
makeInstanceAccelerationStructure( pState );
pState->params.handle = pState->hIAS;
makeProgramGroups( pState );
makePipeline( pState );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to cubic b-spline geometry." << std::endl;
}
break;
case GLFW_KEY_S:
{
pState->pHair->setShadeMode( Hair::SEGMENT_U );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to per-segment u shading." << std::endl;
}
break;
case GLFW_KEY_R:
{
pState->pHair->setShadeMode( Hair::STRAND_U );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to root-to-tip u shading." << std::endl;
}
break;
case GLFW_KEY_I:
{
pState->pHair->setShadeMode( Hair::STRAND_IDX );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to per-hair color shading." << std::endl;
}
break;
case GLFW_KEY_C:
{
pState->pHair->setRadiusMode( Hair::CONSTANT_R );
makeHairGAS( pState );
makeInstanceAccelerationStructure( pState );
pState->params.handle = pState->hIAS;
makeProgramGroups( pState );
makePipeline( pState );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to constant radius hair geometry." << std::endl;
}
break;
case GLFW_KEY_T:
{
pState->pHair->setRadiusMode( Hair::TAPERED_R );
makeHairGAS( pState );
makeInstanceAccelerationStructure( pState );
pState->params.handle = pState->hIAS;
makeProgramGroups( pState );
makePipeline( pState );
makeSBT( pState );
pState->params.subframe_index = 0u;
std::cout << "Switched to tapered radius hair geometry." << std::endl;
}
break;
} // switch
} // if "press"
}
void WindowRenderer::scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
WindowRenderer* pRenderer = GetRenderer( window );
HairState* pState = pRenderer->m_pState;
if( pRenderer->m_trackball.wheelEvent( (int)yscroll ) ) {
pState->params.subframe_index = 0u;
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Renderers.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <optix.h>
#include <optix_stubs.h>
#include "optixHair.h"
#include <sutil/Trackball.h>
#include <string>
// forward declarations
struct GLFWwindow;
class Renderer
{
public:
Renderer( HairState* pState );
Camera defaultCamera() const;
protected:
void render() const;
HairState* const m_pState;
};
class FileRenderer : public Renderer
{
public:
FileRenderer( HairState* pState );
void renderFile( const std::string& fileName ) const;
};
class WindowRenderer : public Renderer
{
public:
WindowRenderer( HairState* pState );
~WindowRenderer();
void run() const;
protected:
//
// GLFW callbacks
//
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods );
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos );
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y );
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified );
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ );
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll );
private:
static WindowRenderer* GetRenderer( GLFWwindow* window );
GLFWwindow* m_window = nullptr;
sutil::Trackball m_trackball = {};
int32_t m_mouseButton = -1;
bool m_minimized = false;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/Util.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
// includes CUDA Runtime
#include <cuda_runtime.h>
#include <sutil/Exception.h>
#include <vector>
template <typename T>
void copyToDevice( const T& source, CUdeviceptr destination )
{
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( destination ), &source, sizeof( T ), cudaMemcpyHostToDevice ) );
}
template <typename T>
void createOnDevice( const T& source, CUdeviceptr* destination )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( destination ), sizeof( T ) ) );
copyToDevice( source, *destination );
}
template <typename T>
void copyToDevice( const std::vector<T>& source, CUdeviceptr destination )
{
CUDA_CHECK( cudaMemcpy( reinterpret_cast<void*>( destination ), source.data(), source.size() * sizeof( T ), cudaMemcpyHostToDevice ) );
}
template <typename T>
void createOnDevice( const std::vector<T>& source, CUdeviceptr* destination )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( destination ), source.size() * sizeof( T ) ) );
copyToDevice( source, *destination );
}
inline std::ostream& operator<<( std::ostream& o, float3 v )
{
o << "(" << v.x << ", " << v.y << ", " << v.z << ")";
return o;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/optixHair.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <cmath>
#include <cstring>
#include <iomanip>
#include <iterator>
#include <cuda/whitted.h>
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include <optix_function_table_definition.h>
#include "Hair.h"
#include "Head.h"
#include "ProgramGroups.h"
#include "Renderers.h"
#include "Util.h"
#include "optixHair.h"
void makeHairGAS( HairState* pState )
{
Hair* const pHair = pState->pHair;
// Free any HairGAS related memory previously allocated.
cudaFree( reinterpret_cast<void*>( pState->deviceBufferHairGAS ) );
pState->deviceBufferHairGAS = 0;
pState->hHairGAS = 0;
// Use default options for simplicity. In a real use case we would want to
// enable compaction, etc
OptixAccelBuildOptions accelBuildOptions = {};
accelBuildOptions.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS;
accelBuildOptions.operation = OPTIX_BUILD_OPERATION_BUILD;
CUdeviceptr devicePoints = 0;
CUdeviceptr deviceWidths = 0;
CUdeviceptr deviceStrands = 0;
auto tempPoints = pState->pHair->points();
createOnDevice( tempPoints, &devicePoints );
createOnDevice( pHair->widths(), &deviceWidths );
auto segments = pHair->segments();
createOnDevice( segments, &deviceStrands );
unsigned int numberOfHairSegments = static_cast<unsigned int>( segments.size() );
// Curve build input.
OptixBuildInput buildInput = {};
buildInput.type = OPTIX_BUILD_INPUT_TYPE_CURVES;
switch( pHair->splineMode() )
{
case Hair::LINEAR_BSPLINE:
buildInput.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR;
break;
case Hair::QUADRATIC_BSPLINE:
buildInput.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE;
break;
case Hair::CUBIC_BSPLINE:
buildInput.curveArray.curveType = OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE;
break;
default:
SUTIL_ASSERT_MSG( false, "Invalid spline mode" );
}
buildInput.curveArray.numPrimitives = numberOfHairSegments;
buildInput.curveArray.vertexBuffers = &devicePoints;
buildInput.curveArray.numVertices = static_cast<unsigned int>( tempPoints.size() );
buildInput.curveArray.vertexStrideInBytes = sizeof( float3 );
buildInput.curveArray.widthBuffers = &deviceWidths;
buildInput.curveArray.widthStrideInBytes = sizeof( float );
buildInput.curveArray.normalBuffers = 0;
buildInput.curveArray.normalStrideInBytes = 0;
buildInput.curveArray.indexBuffer = deviceStrands;
buildInput.curveArray.indexStrideInBytes = sizeof( int );
buildInput.curveArray.flag = OPTIX_GEOMETRY_FLAG_NONE;
buildInput.curveArray.primitiveIndexOffset = 0;
OptixAccelBufferSizes bufferSizesGAS;
OPTIX_CHECK( optixAccelComputeMemoryUsage( pState->context,
&accelBuildOptions,
&buildInput,
1, // Number of build inputs
&bufferSizesGAS ) );
CUdeviceptr deviceTempBufferGAS;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &deviceTempBufferGAS ),
bufferSizesGAS.tempSizeInBytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pState->deviceBufferHairGAS ),
bufferSizesGAS.outputSizeInBytes ) );
CUdeviceptr deviceCompactedSize;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &deviceCompactedSize ),
sizeof(size_t) ) );
OptixAccelEmitDesc emitDesc = {};
emitDesc.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitDesc.result = deviceCompactedSize;
OPTIX_CHECK( optixAccelBuild( pState->context,
0, // CUDA stream
&accelBuildOptions,
&buildInput,
1, // num build inputs
deviceTempBufferGAS,
bufferSizesGAS.tempSizeInBytes,
pState->deviceBufferHairGAS,
bufferSizesGAS.outputSizeInBytes,
&pState->hHairGAS,
&emitDesc, // emitted property list
1 ) ); // num emitted properties
size_t compactedSize;
CUDA_CHECK( cudaMemcpy(&compactedSize, (void*)deviceCompactedSize, sizeof(size_t), cudaMemcpyDeviceToHost) );
printf("bufferSizesGAS.outputSizeInBytes: %zd compacted size: %zd\n",
bufferSizesGAS.outputSizeInBytes, compactedSize);
CUdeviceptr deviceCompactedGAS;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &deviceCompactedGAS), compactedSize ) );
OPTIX_CHECK( optixAccelCompact( pState->context,
0,
pState->hHairGAS,
deviceCompactedGAS,
compactedSize,
&pState->hHairGAS ) );
CUDA_CHECK( cudaFree( (void*)pState->deviceBufferHairGAS ) );
CUDA_CHECK( cudaFree( (void*)deviceCompactedSize ) );
pState->deviceBufferHairGAS = deviceCompactedGAS;
// We can now free the scratch space buffers used during build.
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceTempBufferGAS ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( devicePoints ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceWidths ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceStrands ) ) );
}
void makeInstanceAccelerationStructure( HairState* pState )
{
// Free any memory that has been previously allocated.
cudaFree( reinterpret_cast<void*>( pState->deviceBufferIAS ) );
pState->deviceBufferIAS = 0;
pState->hIAS = 0;
std::vector<OptixInstance> instances;
unsigned int sbtOffset = 0;
OptixInstance instance = {};
// Common instance settings
instance.instanceId = 0;
instance.visibilityMask = 0xFF;
instance.flags = OPTIX_INSTANCE_FLAG_NONE;
sutil::Matrix3x4 yUpTransform = {
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
};
// Head first
if( pState->pHead )
{
memcpy( instance.transform, yUpTransform.getData(), sizeof( float ) * 12 );
instance.sbtOffset = sbtOffset;
instance.traversableHandle = pState->pHead->traversable();
sbtOffset += whitted::RAY_TYPE_COUNT;
instances.push_back( instance );
sutil::Aabb bb = pState->pHead->aabb();
bb.transform( yUpTransform );
pState->aabb.include( bb );
}
// Hair second
if( pState->pHair )
{
memcpy( instance.transform, yUpTransform.getData(), sizeof( float ) * 12 );
instance.sbtOffset = sbtOffset;
instance.traversableHandle = pState->hHairGAS;
sbtOffset += whitted::RAY_TYPE_COUNT;
instances.push_back( instance );
sutil::Aabb bb = pState->pHair->aabb();
bb.transform( yUpTransform );
pState->aabb.include( bb );
}
CUdeviceptr deviceInstances = 0;
createOnDevice( instances, &deviceInstances );
// Instance build input.
OptixBuildInput buildInput = {};
buildInput.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
buildInput.instanceArray.instances = deviceInstances;
buildInput.instanceArray.numInstances = static_cast<unsigned int>( instances.size() );
OptixAccelBuildOptions accelBuildOptions = {};
accelBuildOptions.buildFlags = OPTIX_BUILD_FLAG_NONE;
accelBuildOptions.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes bufferSizesIAS;
OPTIX_CHECK( optixAccelComputeMemoryUsage( pState->context, &accelBuildOptions, &buildInput,
1, // Number of build inputs
&bufferSizesIAS ) );
CUdeviceptr deviceTempBufferIAS;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &deviceTempBufferIAS ),
bufferSizesIAS.tempSizeInBytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pState->deviceBufferIAS ),
bufferSizesIAS.outputSizeInBytes ) );
OPTIX_CHECK( optixAccelBuild( pState->context,
0, // CUDA stream
&accelBuildOptions,
&buildInput,
1, // num build inputs
deviceTempBufferIAS,
bufferSizesIAS.tempSizeInBytes,
pState->deviceBufferIAS,
bufferSizesIAS.outputSizeInBytes,
&pState->hIAS,
nullptr, // emitted property list
0 ) ); // num emitted properties
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceInstances ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( deviceTempBufferIAS ) ) );
}
void initializeCamera( HairState* pState )
{
const float aspectRatio = pState->width / static_cast<float>( pState->height );
const float fovAngle = 30.0f;
pState->camera.setFovY( fovAngle );
const float r = length( pState->aabb.m_max - pState->aabb.center() );
float distance = r / sin( (float)M_PI / 180.0f * fovAngle );
if( aspectRatio > 1.0f )
distance *= aspectRatio;
pState->camera.setLookat( pState->aabb.center() );
pState->camera.setEye( pState->aabb.center() + make_float3( 0.0f, 0.0f, distance ) );
pState->camera.setUp( {0.0f, 1.0f, 0.0f} );
pState->camera.setAspectRatio( aspectRatio );
}
void updateSize( HairState* pState, int width, int height )
{
pState->width = width;
pState->height = height;
const float aspectRatio = pState->width / static_cast<float>( pState->height );
pState->camera.setAspectRatio( aspectRatio );
pState->outputBuffer.resize( pState->width, pState->height );
pState->accumBuffer.resize( pState->width, pState->height );
}
OptixPipelineCompileOptions defaultPipelineCompileOptions( HairState* pState )
{
OptixPipelineCompileOptions pipeOptions = {};
pipeOptions.usesMotionBlur = false;
pipeOptions.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING;
pipeOptions.numPayloadValues = 4;
pipeOptions.numAttributeValues = 1;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
pipeOptions.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
pipeOptions.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
pipeOptions.pipelineLaunchParamsVariableName = "params";
unsigned int primitiveTypes = 0;
if( pState->pHead )
primitiveTypes |= pState->pHead->usesPrimitiveTypes();
if( pState->pHair )
primitiveTypes |= pState->pHair->usesPrimitiveTypes();
pipeOptions.usesPrimitiveTypeFlags = primitiveTypes;
return pipeOptions;
}
void makeProgramGroups( HairState* pState )
{
delete( pState->pProgramGroups );
pState->pProgramGroups = new HairProgramGroups( pState->context, defaultPipelineCompileOptions( pState ) );
// Miss program groups
OptixProgramGroupDesc programGroupDesc = {};
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
programGroupDesc.miss.module = pState->pProgramGroups->m_whittedModule;
programGroupDesc.miss.entryFunctionName = "__miss__constant_radiance";
pState->pProgramGroups->add( programGroupDesc, "miss" );
memset( &programGroupDesc, 0, sizeof( OptixProgramGroupDesc ) );
programGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
programGroupDesc.miss.module = nullptr; // NULL program for occlusion rays
programGroupDesc.miss.entryFunctionName = nullptr;
pState->pProgramGroups->add( programGroupDesc, "missOcclude" );
// add raygen group
{
OptixProgramGroupDesc raygenProgramGroupDesc = {};
raygenProgramGroupDesc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygenProgramGroupDesc.raygen.module = pState->pProgramGroups->m_whittedModule;
raygenProgramGroupDesc.raygen.entryFunctionName = "__raygen__pinhole";
pState->pProgramGroups->add( raygenProgramGroupDesc, "raygen" );
}
if( pState->pHair )
pState->pHair->gatherProgramGroups( pState->pProgramGroups );
if( pState->pHead )
pState->pHead->gatherProgramGroups( pState->pProgramGroups );
}
std::vector<HitRecord> hairSbtHitRecords( HairState* pState, const ProgramGroups& programs )
{
// clear curves_ data
cudaFree( reinterpret_cast<void*>( pState->curves.strand_u.data ) );
pState->curves.strand_u.data = 0;
cudaFree( reinterpret_cast<void*>( pState->curves.strand_i.data ) );
pState->curves.strand_i.data = 0;
cudaFree( reinterpret_cast<void*>( pState->curves.strand_info.data ) );
pState->curves.strand_info.data = 0;
std::vector<HitRecord> records;
HitRecord hitGroupRecord = {};
switch( pState->pHair->splineMode() )
{
case Hair::LINEAR_BSPLINE:
hitGroupRecord.data.geometry_data.type = GeometryData::LINEAR_CURVE_ARRAY;
break;
case Hair::QUADRATIC_BSPLINE:
hitGroupRecord.data.geometry_data.type = GeometryData::QUADRATIC_CURVE_ARRAY;
break;
case Hair::CUBIC_BSPLINE:
hitGroupRecord.data.geometry_data.type = GeometryData::CUBIC_CURVE_ARRAY;
break;
default:
SUTIL_ASSERT_MSG( false, "Invalid spline mode." );
}
CUdeviceptr strandUs = 0;
createOnDevice( pState->pHair->strandU(), &strandUs );
pState->curves.strand_u.data = strandUs;
pState->curves.strand_u.byte_stride = static_cast<uint16_t>( sizeof( float2 ) );
SUTIL_ASSERT( pState->pHair->numberOfSegments() == static_cast<int>( pState->pHair->strandU().size() ) );
pState->curves.strand_u.count = static_cast<uint16_t>( pState->pHair->numberOfSegments() );
pState->curves.strand_u.elmt_byte_size = static_cast<uint16_t>( sizeof( float2 ) );
CUdeviceptr strandIs = 0;
createOnDevice( pState->pHair->strandIndices(), &strandIs );
pState->curves.strand_i.data = strandIs;
pState->curves.strand_i.byte_stride = static_cast<uint16_t>( sizeof( unsigned int ) );
pState->curves.strand_i.count = static_cast<uint16_t>( pState->pHair->numberOfSegments() );
pState->curves.strand_i.elmt_byte_size = static_cast<uint16_t>( sizeof( unsigned int ) );
CUdeviceptr strandInfos = 0;
createOnDevice( pState->pHair->strandInfo(), &strandInfos );
pState->curves.strand_info.data = strandInfos;
pState->curves.strand_info.byte_stride = static_cast<uint16_t>( sizeof( uint2 ) );
pState->curves.strand_info.count = static_cast<uint16_t>( pState->pHair->numberOfStrands() );
pState->curves.strand_info.elmt_byte_size = static_cast<uint16_t>( sizeof( uint2 ) );
hitGroupRecord.data.geometry_data.curves = pState->curves;
hitGroupRecord.data.material_data.pbr.base_color = {0.8f, 0.1f, 0.1f};
hitGroupRecord.data.material_data.pbr.metallic = 0.0f;
hitGroupRecord.data.material_data.pbr.roughness = 0.6f;
std::string name = pState->pHair->programName() + pState->pHair->programSuffix();
OPTIX_CHECK( optixSbtRecordPackHeader( programs[name], &hitGroupRecord ) );
records.push_back( hitGroupRecord );
OPTIX_CHECK( optixSbtRecordPackHeader( programs["occludeCurve"], &hitGroupRecord ) );
records.push_back( hitGroupRecord );
return records;
}
void makeSBT( HairState* pState )
{
std::vector<MissRecord> missRecords;
MissRecord missRecord;
OPTIX_CHECK( optixSbtRecordPackHeader( ( *pState->pProgramGroups )["miss"], &missRecord ) );
missRecords.push_back( missRecord );
OPTIX_CHECK( optixSbtRecordPackHeader( ( *pState->pProgramGroups )["missOcclude"], &missRecord ) );
missRecords.push_back( missRecord );
std::vector<HitRecord> hitRecords;
// Head first
if( pState->pHead )
{
std::vector<HitRecord> headRecords = pState->pHead->sbtHitRecords( *pState->pProgramGroups, whitted::RAY_TYPE_COUNT );
std::copy( headRecords.begin(), headRecords.end(), std::back_inserter( hitRecords ) );
}
// Hair second
if( pState->pHair )
{
std::vector<HitRecord> hairRecords = hairSbtHitRecords( pState, *pState->pProgramGroups );
std::copy( hairRecords.begin(), hairRecords.end(), std::back_inserter( hitRecords ) );
}
// raygen record
RayGenRecord raygenRecord;
OPTIX_CHECK( optixSbtRecordPackHeader( ( *pState->pProgramGroups )["raygen"], &raygenRecord ) );
cudaFree( reinterpret_cast<void*>( pState->SBT.raygenRecord ) );
cudaFree( reinterpret_cast<void*>( pState->SBT.missRecordBase ) );
cudaFree( reinterpret_cast<void*>( pState->SBT.hitgroupRecordBase ) );
CUdeviceptr deviceRayGenRecord;
createOnDevice( raygenRecord, &deviceRayGenRecord );
CUdeviceptr deviceMissRecords;
createOnDevice( missRecords, &deviceMissRecords );
CUdeviceptr deviceHitGroupRecords;
createOnDevice( hitRecords, &deviceHitGroupRecords );
pState->SBT.raygenRecord = deviceRayGenRecord;
pState->SBT.missRecordBase = deviceMissRecords;
pState->SBT.missRecordStrideInBytes = sizeof( MissRecord );
pState->SBT.missRecordCount = static_cast<unsigned int>( missRecords.size() );
pState->SBT.hitgroupRecordBase = deviceHitGroupRecords;
pState->SBT.hitgroupRecordStrideInBytes = sizeof( HitRecord );
pState->SBT.hitgroupRecordCount = static_cast<unsigned int>( hitRecords.size() );
}
void makePipeline( HairState* pState )
{
const uint32_t max_trace_depth = 2;
if( pState->pipeline )
OPTIX_CHECK( optixPipelineDestroy( pState->pipeline ) );
OptixPipelineCompileOptions pipelineCompileOptions = defaultPipelineCompileOptions( pState );
OptixPipelineLinkOptions pipelineLinkOptions = {};
pipelineLinkOptions.maxTraceDepth = max_trace_depth;
pipelineLinkOptions.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
OPTIX_CHECK_LOG2( optixPipelineCreate( pState->context,
&pipelineCompileOptions,
&pipelineLinkOptions,
pState->pProgramGroups->data(),
pState->pProgramGroups->size(),
LOG,
&LOG_SIZE,
&pState->pipeline ) );
OptixStackSizes stack_sizes = {};
for( unsigned int i = 0; i < pState->pProgramGroups->size(); ++i )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( pState->pProgramGroups->data()[i], &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( pState->pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
2 // maxTraversableDepth
) );
}
void printLogMessage( unsigned int level, const char* tag, const char* message, void* /* cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << std::endl;
}
void initializeOptix( HairState* pState )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( nullptr ) );
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &printLogMessage;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( 0 /* default cuda context */, &options, &pState->context ) );
}
void initializeParams( HairState* pState )
{
pState->params.accum_buffer = nullptr; // Unused for the moment
pState->params.frame_buffer = nullptr; // Will be set when output buffer is mapped
pState->params.subframe_index = 0u;
const float loffset = 2.0f * pState->aabb.maxExtent();
pState->params.miss_color = make_float3( 0.1f, 0.1f, 0.4f );
pState->params.eye = pState->camera.eye();
pState->camera.UVWFrame( pState->params.U, pState->params.V, pState->params.W );
pState->lights[0].type = Light::Type::POINT;
pState->lights[0].point.color = {1.0f, 1.0f, 1.0f};
pState->lights[0].point.intensity = 2.0f;
pState->lights[0].point.position = pState->aabb.center() + make_float3( loffset );
pState->lights[0].point.falloff = Light::Falloff::QUADRATIC;
pState->lights[1].type = Light::Type::POINT;
pState->lights[1].point.color = {1.0f, 1.0f, 1.0f};
pState->lights[1].point.intensity = 2.0f;
// headlight...slightly offset to the side of eye.
pState->lights[1].point.position = pState->camera.eye() + pState->params.U;
pState->lights[1].point.falloff = Light::Falloff::QUADRATIC;
pState->params.lights.count = 2;
createOnDevice( pState->lights, &pState->params.lights.data );
pState->params.handle = pState->hIAS;
createOnDevice( pState->params, reinterpret_cast<CUdeviceptr*>( &pState->deviceParams ) );
}
void updateParams( HairState* pState )
{
pState->params.eye = pState->camera.eye();
pState->camera.UVWFrame( pState->params.U, pState->params.V, pState->params.W );
pState->lights[1].point.position = pState->camera.eye() + pState->params.U;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( pState->params.lights.data ),
&pState->lights,
sizeof( pState->lights ),
cudaMemcpyHostToDevice,
0 // stream
) );
}
void renderFrame( HairState* pState )
{
// Launch
pState->params.frame_buffer = pState->outputBuffer.map();
pState->params.accum_buffer = pState->accumBuffer.map();
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( pState->deviceParams ),
&pState->params,
sizeof( whitted::LaunchParams ),
cudaMemcpyHostToDevice,
0 // stream
) );
OPTIX_CHECK( optixLaunch( pState->pipeline,
0, // stream
reinterpret_cast<CUdeviceptr>( pState->deviceParams ),
sizeof( whitted::LaunchParams ),
&( pState->SBT ),
pState->width, // launch width
pState->height, // launch height
1 // launch depth
) );
pState->outputBuffer.unmap();
pState->accumBuffer.unmap();
pState->params.subframe_index++;
CUDA_SYNC_CHECK();
}
void printKeyboardCommands()
{
std::cerr << "\n\nKeyboard commands:\n\n"
" 'q' (or 'ESC'): Quit the application.\n"
" '1' linear b-spline interpretation of the geometry.\n"
" '2' quadratic b-spline interpretation of the geometry.\n"
" '3' cubic b-spline interpretation of the geometry.\n"
" 's' \"segment u\": lerp from red to green via segment u,\n"
" i.e. each segment starts green and ends red.\n"
" 'r' \"root-to-tip u\": lerp red to green with root-to-tip u,\n"
" i.e. hair roots are red and tips are green.\n"
" 'i' \"index color\": assign one of five solid colors (green,\n"
" blue, magenta, cyan, and yellow) based on a hair's index;\n"
" tips lerp to red. The shader in this mode demonstrates\n"
" how to compute a hair index from the primitive index.\n"
" It also does root to tip shading but uses index based math\n"
" to compute a contiguous u along the hair.\n"
" 'c' \"constant radius\" hair geometry.\n"
" 't' \"tapered radius\" hair geometry.\n";
}
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 1024x768\n";
std::cerr << " --hair <model.hair> Specify the hair model; defaults to \"Hair/wStraight.hair\"\n";
std::cerr << " --deg=<1|2|3> Specify the curve degree; defaults to 3\n";
std::cerr << " --help | -h Print this usage message\n\n\n";
printKeyboardCommands();
exit( 0 );
}
//
// Main program
//
int main( int argc, char* argv[] )
{
//
// Command-line parameter parsing
//
std::string hairFile( "Hair/wStraight.hair" );
std::vector<int> image_size( 2 );
image_size[0] = 1024;
image_size[1] = 786;
int curveDegree = 3;
std::string outputFile;
//
// Parse command line options
//
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--hair" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
hairFile = argv[++i];
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outputFile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
sutil::parseDimensions( dims_arg.c_str(), image_size[0], image_size[1] );
}
else if( arg.substr( 0, 6 ) == "--deg=" )
{
const std::string deg_arg = arg.substr( 6 );
curveDegree = atoi( deg_arg.c_str() );
std::cerr << "curveDegree = " << curveDegree << std::endl;
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
printKeyboardCommands();
std::cerr << "\n\n" << std::endl;
HairState state = {};
initializeOptix( &state );
state.outputBuffer.setStream( 0 ); // CUDA stream
state.accumBuffer.setStream( 0 ); // CUDA stream
std::string hairFileName = sutil::sampleDataFilePath( hairFile.c_str() );
Hair hair( state.context, hairFileName );
state.width = image_size[0];
state.height = image_size[1];
state.outputBuffer.resize( state.width, state.height );
state.accumBuffer.resize( state.width, state.height );
if( 1 == curveDegree )
hair.setSplineMode( Hair::LINEAR_BSPLINE );
else if( 2 == curveDegree )
hair.setSplineMode( Hair::QUADRATIC_BSPLINE );
else if( 3 == curveDegree )
hair.setSplineMode( Hair::CUBIC_BSPLINE );
else
SUTIL_ASSERT_MSG( false, "Invalid curve degree" );
std::cout << hair << std::endl;
state.pHair = &hair;
std::string headFileName = sutil::sampleDataFilePath( "Hair/woman.gltf" );
const Head head( state.context, headFileName );
std::cout << head << std::endl;
state.pHead = &head;
// with head and hair set put them into an IAS...
makeHairGAS( &state );
makeInstanceAccelerationStructure( &state );
initializeCamera( &state );
makeProgramGroups( &state );
makePipeline( &state );
makeSBT( &state );
initializeParams( &state );
if( !outputFile.empty() ) // render single frame to file
{
const FileRenderer renderer( &state );
renderer.renderFile( outputFile.c_str() );
}
else
{
WindowRenderer renderer( &state );
renderer.run();
}
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/optixHair.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/GeometryData.h>
#include <cuda/LocalGeometry.h>
#include <cuda/curve.h>
#include <cuda/helpers.h>
#include <cuda/whitted_cuda.h>
#include <sutil/vec_math.h>
// Get curve hit-point in world coordinates.
static __forceinline__ __device__ float3 getHitPoint()
{
const float t = optixGetRayTmax();
const float3 rayOrigin = optixGetWorldRayOrigin();
const float3 rayDirection = optixGetWorldRayDirection();
return rayOrigin + t * rayDirection;
}
// Compute surface normal of quadratic pimitive in world space.
static __forceinline__ __device__ float3 normalLinear( const int primitiveIndex )
{
const OptixTraversableHandle gas = optixGetGASTraversableHandle();
const unsigned int gasSbtIndex = optixGetSbtGASIndex();
float4 controlPoints[2];
optixGetLinearCurveVertexData( gas, primitiveIndex, gasSbtIndex, 0.0f, controlPoints );
LinearBSplineSegment interpolator( controlPoints );
float3 hitPoint = getHitPoint();
// interpolators work in object space
hitPoint = optixTransformPointFromWorldToObjectSpace( hitPoint );
const float3 normal = surfaceNormal( interpolator, optixGetCurveParameter(), hitPoint );
return optixTransformNormalFromObjectToWorldSpace( normal );
}
// Compute surface normal of quadratic pimitive in world space.
static __forceinline__ __device__ float3 normalQuadratic( const int primitiveIndex )
{
const OptixTraversableHandle gas = optixGetGASTraversableHandle();
const unsigned int gasSbtIndex = optixGetSbtGASIndex();
float4 controlPoints[3];
optixGetQuadraticBSplineVertexData( gas, primitiveIndex, gasSbtIndex, 0.0f, controlPoints );
QuadraticBSplineSegment interpolator( controlPoints );
float3 hitPoint = getHitPoint();
// interpolators work in object space
hitPoint = optixTransformPointFromWorldToObjectSpace( hitPoint );
const float3 normal = surfaceNormal( interpolator, optixGetCurveParameter(), hitPoint );
return optixTransformNormalFromObjectToWorldSpace( normal );
}
// Compute surface normal of cubic pimitive in world space.
static __forceinline__ __device__ float3 normalCubic( const int primitiveIndex )
{
const OptixTraversableHandle gas = optixGetGASTraversableHandle();
const unsigned int gasSbtIndex = optixGetSbtGASIndex();
float4 controlPoints[4];
optixGetCubicBSplineVertexData( gas, primitiveIndex, gasSbtIndex, 0.0f, controlPoints );
CubicBSplineSegment interpolator( controlPoints );
float3 hitPoint = getHitPoint();
// interpolators work in object space
hitPoint = optixTransformPointFromWorldToObjectSpace( hitPoint );
const float3 normal = surfaceNormal( interpolator, optixGetCurveParameter(), hitPoint );
return optixTransformNormalFromObjectToWorldSpace( normal );
}
static __forceinline__ __device__ float3 shade( const whitted::HitGroupData* hitGroupData, const float3 hitPoint, const float3 normal, const float3 base_color )
{
//
// Retrieve material data
//
float metallic = hitGroupData->material_data.pbr.metallic;
float roughness = hitGroupData->material_data.pbr.roughness;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color * ( 1.0f - F0 ) * ( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness * roughness;
float3 result = make_float3( 0.0f );
for( int i = 0; i < whitted::params.lights.count; ++i )
{
Light light = whitted::params.lights[i];
if( light.type == Light::Type::POINT )
{
const float L_dist = length( light.point.position - hitPoint );
const float3 L = ( light.point.position - hitPoint ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( normal, L );
const float N_dot_V = dot( normal, V );
const float N_dot_H = dot( normal, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = whitted::traceOcclusion( whitted::params.handle, hitPoint, L, tmin, tmax );
if( !occluded )
{
const float3 F = whitted::schlick( spec_color, V_dot_H );
const float G_vis = whitted::vis( N_dot_L, N_dot_V, alpha );
const float D = whitted::ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F ) * diff_color / M_PIf;
const float3 spec = F * G_vis * D;
result += light.point.color * light.point.intensity * N_dot_L * ( diff + spec );
}
}
}
}
return result;
}
// Get u-parameter for full strand.
//
// Parameters:
// geo - the GeometricData from the SBT.
// primitiveIndex - the primitive index
//
static __forceinline__ __device__ float getStrandU( const GeometryData& geo, const int primitiveIndex )
{
float segmentU = optixGetCurveParameter();
float2 strandInfo = geo.curves.strand_u[primitiveIndex];
// strandInfo.x ~ strand u at segment start
// strandInfo.y ~ scale factor (i.e. 1/numberOfSegments)
return strandInfo.x + segmentU * strandInfo.y;
}
// Compute normal
//
static __forceinline__ __device__ float3 computeNormal( OptixPrimitiveType type, const int primitiveIndex )
{
switch( type ) {
case OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR:
return normalLinear( primitiveIndex );
break;
case OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE:
return normalQuadratic( primitiveIndex );
break;
case OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE:
return normalCubic( primitiveIndex );
break;
}
return make_float3(0.0f);
}
extern "C" __global__ void __closesthit__curve_strand_u()
{
const unsigned int primitiveIndex = optixGetPrimitiveIndex();
const whitted::HitGroupData* hitGroupData = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const GeometryData& geometryData = reinterpret_cast<const GeometryData&>( hitGroupData->geometry_data );
const float3 normal = computeNormal( optixGetPrimitiveType(), primitiveIndex );
const float3 colors[2] = {make_float3( 1, 0, 0 ), make_float3( 0, 1, 0 )};
const float u = getStrandU( geometryData, primitiveIndex );
const float3 base_color = colors[0] * u + colors[1] * ( 1 - u );
const float3 hitPoint = getHitPoint();
const float3 result = shade( hitGroupData, hitPoint, normal, base_color );
whitted::setPayloadResult( result );
}
extern "C" __global__ void __closesthit__curve_segment_u()
{
const unsigned int primitiveIndex = optixGetPrimitiveIndex();
const whitted::HitGroupData* hitGroupData = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const float3 normal = computeNormal( optixGetPrimitiveType(), primitiveIndex );
const float3 colors[3] = {make_float3( 1, 0, 0 ), make_float3( 0, 1, 0 ),
make_float3( 0, 0, 1 ) };
const float u = optixGetCurveParameter();
float3 base_color;
if( u == 0.0f || u == 1.0f ) // on end-cap
base_color = colors[2];
else
base_color = colors[0] * u + colors[1] * ( 1 - u );
const float3 hitPoint = getHitPoint();
const float3 result = shade( hitGroupData, hitPoint, normal, base_color );
whitted::setPayloadResult( result );
}
extern "C" __global__ void __closesthit__curve_strand_idx()
{
unsigned int primitiveIndex = optixGetPrimitiveIndex();
const whitted::HitGroupData* hitGroupData = reinterpret_cast<whitted::HitGroupData*>( optixGetSbtDataPointer() );
const GeometryData& geometryData = reinterpret_cast<const GeometryData&>( hitGroupData->geometry_data );
float3 normal = computeNormal( optixGetPrimitiveType(), primitiveIndex );
float3 colors[6] = {make_float3( 1, 0, 0 ), make_float3( 0, 1, 0 ), make_float3( 0, 0, 1 ),
make_float3( 1, 1, 0 ), make_float3( 1, 0, 1 ), make_float3( 0, 1, 1 )};
unsigned int strandIndex = geometryData.curves.strand_i[primitiveIndex];
uint2 strandInfo = geometryData.curves.strand_info[strandIndex];
float u = ( primitiveIndex - strandInfo.x ) / (float)strandInfo.y;
float3 base_color = colors[0] * u + colors[strandIndex % 5 + 1] * ( 1.0f - u );
float3 hitPoint = getHitPoint();
float3 result = shade( hitGroupData, hitPoint, normal, base_color );
whitted::setPayloadResult( result );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHair/optixHair.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include "whitted.h"
#include <sutil/CUDAOutputBuffer.h>
#include <cuda/BufferView.h>
#include <cuda_runtime.h>
#include <sutil/Aabb.h>
#include <sutil/Record.h>
#include <sutil/Camera.h>
typedef sutil::EmptyRecord RayGenRecord;
typedef sutil::EmptyRecord MissRecord;
typedef sutil::Record<whitted::HitGroupData> HitRecord;
//
// forward declarations
//
class Hair;
class Head;
class Camera;
class ShaderBindingTable;
class HairProgramGroups;
struct HairState
{
OptixDeviceContext context = 0;
Hair* pHair;
const Head* pHead;
sutil::Camera camera = {};
unsigned int width = 0;
unsigned int height = 0;
sutil::CUDAOutputBuffer<uchar4> outputBuffer = sutil::CUDAOutputBuffer<uchar4>(sutil::CUDAOutputBufferType::CUDA_DEVICE, 1, 1);
sutil::CUDAOutputBuffer<float4> accumBuffer = sutil::CUDAOutputBuffer<float4>(sutil::CUDAOutputBufferType::CUDA_DEVICE, 1, 1);
sutil::Aabb aabb;
whitted::LaunchParams params = {};
whitted::LaunchParams* deviceParams = nullptr;
Light lights[2] = {};
OptixTraversableHandle hHairGAS = 0;
CUdeviceptr deviceBufferHairGAS = 0;
OptixTraversableHandle hIAS = 0;
CUdeviceptr deviceBufferIAS = 0;
// for curves SBT record
GeometryData::Curves curves = {};
//ShaderBindingTable* pSBT = nullptr;
HairProgramGroups* pProgramGroups = nullptr;
OptixPipeline pipeline = 0;
OptixShaderBindingTable SBT = {};
};
void makeHairGAS( HairState* pState );
void makeInstanceAccelerationStructure( HairState* pState );
void makePipeline( HairState* pState );
void makeProgramGroups( HairState* pState );
void makeSBT( HairState* pState );
void renderFrame( HairState* pState );
void initializeParams( HairState* pState );
void updateParams( HairState* pState );
void updateSize( HairState* pState, int width, int height );
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHello/draw_solid_color.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixHello.h"
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
extern "C"
__global__ void __raygen__draw_solid_color()
{
uint3 launch_index = optixGetLaunchIndex();
RayGenData* rtData = (RayGenData*)optixGetSbtDataPointer();
params.image[launch_index.y * params.image_width + launch_index.x] =
make_color( make_float3( rtData->r, rtData->g, rtData->b ) );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHello/optixHello.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <cuda_runtime.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/sutil.h>
#include <sampleConfig.h>
#include "optixHello.h"
#include <iomanip>
#include <iostream>
#include <string>
template <typename T>
struct SbtRecord
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef SbtRecord<RayGenData> RayGenSbtRecord;
typedef SbtRecord<int> MissSbtRecord;
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> Specify file for image output\n";
std::cerr << " --help | -h Print this usage message\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 512x384\n";
exit( 1 );
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */)
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
int main( int argc, char* argv[] )
{
std::string outfile;
int width = 512;
int height = 384;
for( int i = 1; i < argc; ++i )
{
const std::string arg( argv[i] );
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--file" || arg == "-f" )
{
if( i < argc - 1 )
{
outfile = argv[++i];
}
else
{
printUsageAndExit( argv[0] );
}
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
sutil::parseDimensions( dims_arg.c_str(), width, height );
}
else
{
std::cerr << "Unknown option '" << arg << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
char log[2048]; // For error reporting from OptiX creation functions
//
// Initialize CUDA and create OptiX context
//
OptixDeviceContext context = nullptr;
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
CUcontext cuCtx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
}
//
// Create module
//
OptixModule module = nullptr;
OptixPipelineCompileOptions pipeline_compile_options = {};
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
pipeline_compile_options.usesMotionBlur = false;
pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING;
pipeline_compile_options.numPayloadValues = 2;
pipeline_compile_options.numAttributeValues = 2;
pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE; // TODO: should be OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "draw_solid_color.cu", inputSize );
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
context,
&module_compile_options,
&pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&module
) );
}
//
// Create program groups, including NULL miss and hitgroups
//
OptixProgramGroup raygen_prog_group = nullptr;
OptixProgramGroup miss_prog_group = nullptr;
{
OptixProgramGroupOptions program_group_options = {}; // Initialize to zeros
OptixProgramGroupDesc raygen_prog_group_desc = {}; //
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__draw_solid_color";
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
context,
&raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&raygen_prog_group
) );
// Leave miss group's module and entryfunc name null
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&miss_prog_group
) );
}
//
// Link pipeline
//
OptixPipeline pipeline = nullptr;
{
const uint32_t max_trace_depth = 0;
OptixProgramGroup program_groups[] = { raygen_prog_group };
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
context,
&pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&pipeline
) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
2 // maxTraversableDepth
) );
}
//
// Set up shader binding table
//
OptixShaderBindingTable sbt = {};
{
CUdeviceptr raygen_record;
const size_t raygen_record_size = sizeof( RayGenSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &raygen_record ), raygen_record_size ) );
RayGenSbtRecord rg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( raygen_prog_group, &rg_sbt ) );
rg_sbt.data = {0.462f, 0.725f, 0.f};
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( raygen_record ),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr miss_record;
size_t miss_record_size = sizeof( MissSbtRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &miss_record ), miss_record_size ) );
RayGenSbtRecord ms_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( miss_prog_group, &ms_sbt ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( miss_record ),
&ms_sbt,
miss_record_size,
cudaMemcpyHostToDevice
) );
sbt.raygenRecord = raygen_record;
sbt.missRecordBase = miss_record;
sbt.missRecordStrideInBytes = sizeof( MissSbtRecord );
sbt.missRecordCount = 1;
}
sutil::CUDAOutputBuffer<uchar4> output_buffer( sutil::CUDAOutputBufferType::CUDA_DEVICE, width, height );
//
// launch
//
{
CUstream stream;
CUDA_CHECK( cudaStreamCreate( &stream ) );
Params params;
params.image = output_buffer.map();
params.image_width = width;
CUdeviceptr d_param;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_param ), sizeof( Params ) ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_param ),
¶ms, sizeof( params ),
cudaMemcpyHostToDevice
) );
OPTIX_CHECK( optixLaunch( pipeline, stream, d_param, sizeof( Params ), &sbt, width, height, /*depth=*/1 ) );
CUDA_SYNC_CHECK();
output_buffer.unmap();
}
//
// Display results
//
{
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = width;
buffer.height = height;
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
if( outfile.empty() )
sutil::displayBufferWindow( argv[0], buffer );
else
sutil::saveImage( outfile.c_str(), buffer, false );
}
//
// Cleanup
//
{
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( sbt.missRecordBase ) ) );
OPTIX_CHECK( optixPipelineDestroy( pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( miss_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( raygen_prog_group ) );
OPTIX_CHECK( optixModuleDestroy( module ) );
OPTIX_CHECK( optixDeviceContextDestroy( context ) );
}
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixHello/optixHello.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
struct Params
{
uchar4* image;
unsigned int image_width;
};
struct RayGenData
{
float r,g,b;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMeshViewer/optixMeshViewer.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <cuda/whitted.h>
#include <cuda/Light.h>
#include <sutil/Camera.h>
#include <sutil/Trackball.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Scene.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <GLFW/glfw3.h>
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
//#define USE_IAS // WAR for broken direct intersection of GAS on non-RTX cards
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int32_t mouse_button = -1;
int32_t samples_per_launch = 16;
whitted::LaunchParams* d_params = nullptr;
whitted::LaunchParams params = {};
int32_t width = 768;
int32_t height = 768;
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking(static_cast<int>( xpos ), static_cast<int>( ypos ));
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
width = res_x;
height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q ||
key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if(trackball.wheelEvent((int)yscroll))
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 768x768\n";
std::cerr << " --launch-samples | -s Number of samples per pixel per launch (default 16)\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --model <model.gltf> Specify model to render (required)\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( const sutil::Scene& scene ) {
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( ¶ms.accum_buffer ),
width*height*sizeof(float4)
) );
params.frame_buffer = nullptr; // Will be set when output buffer is mapped
params.subframe_index = 0u;
const float loffset = scene.aabb().maxExtent();
// TODO: add light support to sutil::Scene
std::vector<Light> lights( 2 );
lights[0].type = Light::Type::POINT;
lights[0].point.color = {1.0f, 1.0f, 0.8f};
lights[0].point.intensity = 5.0f;
lights[0].point.position = scene.aabb().center() + make_float3( loffset );
lights[0].point.falloff = Light::Falloff::QUADRATIC;
lights[1].type = Light::Type::POINT;
lights[1].point.color = {0.8f, 0.8f, 1.0f};
lights[1].point.intensity = 3.0f;
lights[1].point.position = scene.aabb().center() + make_float3( -loffset, 0.5f * loffset, -0.5f * loffset );
lights[1].point.falloff = Light::Falloff::QUADRATIC;
params.lights.count = static_cast<uint32_t>( lights.size() );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( ¶ms.lights.data ),
lights.size() * sizeof( Light )
) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( params.lights.data ),
lights.data(),
lights.size() * sizeof( Light ),
cudaMemcpyHostToDevice
) );
params.miss_color = make_float3( 0.1f );
//CUDA_CHECK( cudaStreamCreate( &stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_params ), sizeof( whitted::LaunchParams ) ) );
params.handle = scene.traversableHandle();
}
void handleCameraUpdate( whitted::LaunchParams& params )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( width ) / static_cast<float>( height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
/*
std::cerr
<< "Updating camera:\n"
<< "\tU: " << params.U.x << ", " << params.U.y << ", " << params.U.z << std::endl
<< "\tV: " << params.V.x << ", " << params.V.y << ", " << params.V.z << std::endl
<< "\tW: " << params.W.x << ", " << params.W.y << ", " << params.W.z << std::endl;
*/
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( width, height );
// Realloc accumulation buffer
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( ¶ms.accum_buffer ),
width*height*sizeof(float4)
) );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, whitted::LaunchParams& params )
{
// Update params on device
if( camera_changed || resize_dirty )
params.subframe_index = 0;
handleCameraUpdate( params );
handleResize( output_buffer );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, const sutil::Scene& scene )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( d_params ),
¶ms,
sizeof( whitted::LaunchParams ),
cudaMemcpyHostToDevice,
0 // stream
) );
OPTIX_CHECK( optixLaunch(
scene.pipeline(),
0, // stream
reinterpret_cast<CUdeviceptr>( d_params ),
sizeof( whitted::LaunchParams ),
scene.sbt(),
width, // launch width
height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe(
sutil::CUDAOutputBuffer<uchar4>& output_buffer,
sutil::GLDisplay& gl_display,
GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
void initCameraState( const sutil::Scene& scene )
{
camera = scene.camera();
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ), make_float3( 0.0f, 0.0f, 1.0f ), make_float3( 0.0f, 1.0f, 0.0f ) );
trackball.setGimbalLock(true);
}
void cleanup()
{
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.lights.data ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
//
// Parse command line options
//
std::string outfile;
std::string infile = sutil::sampleDataFilePath( "WaterBottle/WaterBottle.gltf" );
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--model" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
infile = argv[++i];
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
sutil::parseDimensions( dims_arg.c_str(), width, height );
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
if( infile.empty() )
{
std::cerr << "--model argument required" << std::endl;
printUsageAndExit( argv[0] );
}
try
{
sutil::Scene scene;
sutil::loadScene( infile.c_str(), scene );
scene.finalize();
OPTIX_CHECK( optixInit() ); // Need to initialize function table
initCameraState( scene );
initLaunchParams( scene );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixMeshViewer", width, height );
glfwSetMouseButtonCallback ( window, mouseButtonCallback );
glfwSetCursorPosCallback ( window, cursorPosCallback );
glfwSetWindowSizeCallback ( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback ( window, keyCallback );
glfwSetScrollCallback ( window, scrollCallback );
glfwSetWindowUserPointer ( window, ¶ms );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, width, height );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateState( output_buffer, params );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, scene );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers(window);
++params.subframe_index;
}
while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer(output_buffer_type, width, height);
handleCameraUpdate( params);
handleResize( output_buffer );
launchSubframe( output_buffer, scene );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage(outfile.c_str(), buffer, false);
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanup();
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixModuleCreateAbort/optixModuleCreateAbort.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// This sample shows how to run module creation in a separate process
// so that it can be aborted at any time by killing that process
// (by pressing the 'A' key). The compiled result is stored in the OptiX
// disk cache, so that when the main application creates the module again,
// there is no compile time.
//
// This sample is a modified version of the optixBoundValues sample.
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Trackball.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <optix_stack_size.h>
#include <GLFW/glfw3.h>
#include "optixModuleCreateAbort.h"
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
#ifdef WIN32
#include <Windows.h>
#else
#include <sys/types.h>
#include <sys/wait.h>
#include <linux/limits.h>
#include <spawn.h>
#include <signal.h>
#include <unistd.h>
#endif
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int32_t mouse_button = -1;
int32_t samples_per_launch = 16;
// The number of samples to calculate the light are specified in the launch parameters.
// That value can be specialized to a fixed value at compile time.
// Note than when changing the number of light samples at runtime by pressing the PLUS
// or MINUS keys with specialization enabled recompilation of the closest hit module
// is necessary or it needs to be loaded from the cache.
unsigned int light_samples = 1;
bool specialize = true;
// Keep a list of all compile operations in flight to be able to check their state each frame until completed
struct CompileOperation
{
std::string ptx;
std::string temp_file;
OptixModuleCompileOptions module_compile_options;
OptixPipelineCompileOptions pipeline_compile_options;
std::vector<OptixModuleCompileBoundValueEntry> bound_values;
OptixModule* target_module;
#ifdef WIN32
HANDLE process_handle;
#else
pid_t process_id;
#endif
};
std::vector<CompileOperation> compile_operations_in_flight;
//------------------------------------------------------------------------------
//
// Local types
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct Vertex
{
float x, y, z, pad;
};
struct IndexedTriangle
{
uint32_t v1, v2, v3, pad;
};
struct Instance
{
float transform[12];
};
struct PathTracerState
{
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_gas_output_buffer = 0; // Triangle AS memory
CUdeviceptr d_vertices = 0;
OptixModule ptx_module = 0;
OptixModule ptx_module_radiance = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup radiance_miss_group = 0;
OptixProgramGroup occlusion_miss_group = 0;
OptixProgramGroup radiance_hit_group = 0;
OptixProgramGroup occlusion_hit_group = 0;
CUstream stream = 0;
Params params;
Params* d_params;
OptixShaderBindingTable sbt = {};
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t TRIANGLE_COUNT = 32;
const int32_t MAT_COUNT = 4;
const static std::array<Vertex, TRIANGLE_COUNT* 3> g_vertices =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
// Left wall -- red lambert
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
// Short block -- white lambert
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
// Tall block -- white lambert
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
// Ceiling light -- emmissive
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 332.0f, 0.0f }
} };
static std::array<uint32_t, TRIANGLE_COUNT> g_mat_indices = {{
0, 0, // Floor -- white lambert
0, 0, // Ceiling -- white lambert
0, 0, // Back wall -- white lambert
1, 1, // Right wall -- green lambert
2, 2, // Left wall -- red lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Short block -- white lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Tall block -- white lambert
3, 3 // Ceiling light -- emmissive
}};
const std::array<float3, MAT_COUNT> g_emission_colors =
{ {
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 15.0f, 15.0f, 5.0f }
} };
const std::array<float3, MAT_COUNT> g_diffuse_colors =
{ {
{ 0.80f, 0.80f, 0.80f },
{ 0.05f, 0.80f, 0.05f },
{ 0.80f, 0.05f, 0.05f },
{ 0.50f, 0.00f, 0.00f }
} };
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking( static_cast<int>( xpos ), static_cast<int>( ypos ) );
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params.width, params.height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), params.width, params.height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
params.width = res_x;
params.height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
void createRadianceModule( PathTracerState& state );
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
else if( key == GLFW_KEY_S )
{
specialize = !specialize;
// Invoke compile process for new module, pipeline is then later updated in the render loop
createRadianceModule( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
else if( key == GLFW_KEY_A )
{
// Terminate all compile processes in flight
for( const CompileOperation& operation : compile_operations_in_flight )
{
#ifdef WIN32
// Ignore failure in case process has already exited
TerminateProcess( operation.process_handle, 1 );
// Wait for process to actually exit before deleting temporary file, since TerminateProcess is asynchronous and it may still have a handle to it open
WaitForSingleObject( operation.process_handle, 1000 );
SUTIL_ASSERT( DeleteFileA( operation.temp_file.c_str() ) );
SUTIL_ASSERT( CloseHandle( operation.process_handle ) );
#else
SUTIL_ASSERT( kill( operation.process_id, SIGTERM ) == 0 );
SUTIL_ASSERT( remove( operation.temp_file.c_str() ) == 0 );
#endif
}
compile_operations_in_flight.clear();
}
}
static void charCallback( GLFWwindow* window, unsigned int codepoint )
{
if( codepoint == '+' )
{
++light_samples;
if( specialize )
createRadianceModule( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
else if( codepoint == '-' )
{
if( light_samples > 1 )
{
--light_samples;
if( specialize )
createRadianceModule( *static_cast<PathTracerState*>(glfwGetWindowUserPointer( window )) );
}
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if( trackball.wheelEvent( (int)yscroll ) )
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --launch-samples | -s Number of samples per pixel per launch (default 16)\n";
std::cerr << " --light-samples | -l Number of radiance samples (default 1)\n";
std::cerr << " --no-specialize ...\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 768x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( PathTracerState& state )
{
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &state.params.accum_buffer ),
state.params.width * state.params.height * sizeof( float4 )
) );
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.samples_per_launch = samples_per_launch;
state.params.light_samples = light_samples;
state.params.subframe_index = 0u;
state.params.light.emission = make_float3( 15.0f, 15.0f, 5.0f );
state.params.light.corner = make_float3( 343.0f, 548.5f, 227.0f );
state.params.light.v1 = make_float3( 0.0f, 0.0f, 105.0f );
state.params.light.v2 = make_float3( -130.0f, 0.0f, 0.0f );
state.params.light.normal = normalize( cross( state.params.light.v1, state.params.light.v2 ) );
state.params.handle = state.gas_handle;
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params ), sizeof( Params ) ) );
}
void handleCameraUpdate( Params& params )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( params.width ) / static_cast<float>( params.height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
// Realloc accumulation buffer
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( params.accum_buffer ) ) );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( ¶ms.accum_buffer ),
params.width * params.height * sizeof( float4 )
) );
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
// Update params on device
if( camera_changed || resize_dirty )
params.subframe_index = 0;
params.light_samples = light_samples;
handleCameraUpdate( params );
handleResize( output_buffer, params );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, PathTracerState& state )
{
// Cannot launch while modules are still compiling (on startup)
if( state.pipeline == nullptr )
return;
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync(
reinterpret_cast<void*>( state.d_params ),
&state.params, sizeof( Params ),
cudaMemcpyHostToDevice, state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void initCameraState()
{
camera.setEye( make_float3( 278.0f, 273.0f, -900.0f ) );
camera.setLookat( make_float3( 278.0f, 273.0f, 330.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame(
make_float3( 1.0f, 0.0f, 0.0f ),
make_float3( 0.0f, 0.0f, 1.0f ),
make_float3( 0.0f, 1.0f, 0.0f )
);
trackball.setGimbalLock( true );
}
void createContext( PathTracerState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cu_ctx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cu_ctx, &options, &context ) );
state.context = context;
}
void buildMeshAccel( PathTracerState& state )
{
//
// copy mesh data to device
//
const size_t vertices_size_in_bytes = g_vertices.size() * sizeof( Vertex );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_vertices ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( state.d_vertices ),
g_vertices.data(), vertices_size_in_bytes,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_mat_indices = 0;
const size_t mat_indices_size_in_bytes = g_mat_indices.size() * sizeof( uint32_t );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_mat_indices ), mat_indices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_mat_indices ),
g_mat_indices.data(),
mat_indices_size_in_bytes,
cudaMemcpyHostToDevice
) );
//
// Build triangle GAS
//
uint32_t triangle_input_flags[MAT_COUNT] = // One per SBT record for this build input
{
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT
};
OptixBuildInput triangle_input = {};
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes = sizeof( Vertex );
triangle_input.triangleArray.numVertices = static_cast<uint32_t>( g_vertices.size() );
triangle_input.triangleArray.vertexBuffers = &state.d_vertices;
triangle_input.triangleArray.flags = triangle_input_flags;
triangle_input.triangleArray.numSbtRecords = MAT_COUNT;
triangle_input.triangleArray.sbtIndexOffsetBuffer = d_mat_indices;
triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = sizeof( uint32_t );
triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = sizeof( uint32_t );
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
state.context,
&accel_options,
&triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&triangle_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_mat_indices ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof(size_t), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.gas_handle, state.d_gas_output_buffer, compacted_gas_size, &state.gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createModule( OptixDeviceContext context, const std::string& ptx, const OptixModuleCompileOptions& module_compile_options, const OptixPipelineCompileOptions& pipeline_compile_options, OptixModule& module )
{
// Get current CUDA device and pass that info to the compile process
int device = 0;
CUDA_CHECK(cudaGetDevice( &device ));
// Write PTX to a temporary file that can be passed on to the compile process
static int invocation = 0;
const std::string filename = "temp_" + std::to_string( reinterpret_cast<uintptr_t>(&module) ) + "_" + std::to_string(invocation++) + ".ptx";
std::ofstream temp_file( filename, std::ios::binary );
SUTIL_ASSERT( temp_file.is_open() );
temp_file.write( ptx.data(), ptx.size() );
temp_file.close();
// Build command-line for the compile process
std::string cmd;
cmd += " --file " + filename;
cmd += " --device " + std::to_string( device );
cmd += " --maxRegisterCount ";
cmd += std::to_string( module_compile_options.maxRegisterCount );
cmd += " --optLevel ";
cmd += std::to_string( static_cast<int>(module_compile_options.optLevel) );
cmd += " --debugLevel ";
cmd += std::to_string( static_cast<int>(module_compile_options.debugLevel) );
for( unsigned int i = 0; i < module_compile_options.numBoundValues; ++i )
{
const OptixModuleCompileBoundValueEntry& bound_value = module_compile_options.boundValues[i];
cmd += " --boundValue ";
cmd += std::to_string( bound_value.pipelineParamOffsetInBytes );
cmd += " ";
cmd += std::to_string( bound_value.sizeInBytes );
cmd += " \"";
cmd += bound_value.annotation;
cmd += "\" \"";
// Encode bound value data as a string
for( size_t byte = 0; byte < bound_value.sizeInBytes; ++byte )
{
const char byte_value = static_cast<const char*>(bound_value.boundValuePtr)[byte];
cmd += '0' + ( byte_value & 0xF); // byte_low
cmd += '0' + ((byte_value >> 4) & 0xF); // byte_high
}
cmd += "\"";
}
cmd += " --usesMotionBlur ";
cmd += pipeline_compile_options.usesMotionBlur ? "1" : "0";
cmd += " --traversableGraphFlags ";
cmd += std::to_string( pipeline_compile_options.traversableGraphFlags );
cmd += " --numPayloadValues ";
cmd += std::to_string( pipeline_compile_options.numPayloadValues );
cmd += " --numAttributeValues ";
cmd += std::to_string( pipeline_compile_options.numAttributeValues );
cmd += " --exceptionFlags ";
cmd += std::to_string( pipeline_compile_options.exceptionFlags );
cmd += " --pipelineLaunchParamsVariableName ";
cmd += pipeline_compile_options.pipelineLaunchParamsVariableName;
cmd += " --usesPrimitiveTypeFlags ";
cmd += std::to_string( pipeline_compile_options.usesPrimitiveTypeFlags );
CompileOperation operation;
operation.ptx = ptx;
operation.temp_file = filename;
operation.module_compile_options = module_compile_options;
operation.pipeline_compile_options = pipeline_compile_options;
operation.bound_values.assign( module_compile_options.boundValues, module_compile_options.boundValues + module_compile_options.numBoundValues );
operation.target_module = &module;
#ifdef WIN32
// Get path to the module creation executable (optixModuleCreateProcess.exe), which should be right next to the sample executable
char executablePath[MAX_PATH] = "";
SUTIL_ASSERT( GetModuleFileNameA( nullptr, executablePath, sizeof( executablePath ) ) );
std::string createProcessPath = executablePath;
createProcessPath.erase( createProcessPath.rfind( '\\' ) + 1 );
createProcessPath += "optixModuleCreateProcess.exe";
STARTUPINFO si = { sizeof( si ) };
PROCESS_INFORMATION pi = {};
SUTIL_ASSERT( CreateProcessA( createProcessPath.c_str(), const_cast<char*>(cmd.c_str()), nullptr, nullptr, FALSE, 0, nullptr, nullptr, &si, &pi ) );
operation.process_handle = pi.hProcess;
#else
std::vector<char*> argv;
// Split combined argument string into separate arguments
cmd += ' ';
for( size_t offset = 0, next; (next = cmd.find( ' ', offset )) != std::string::npos; offset = next + 1 )
{
// Remove quotes around argument
if( cmd[offset] == '\"' )
{
offset++;
cmd[next - 1] = '\0';
}
cmd[next] = '\0';
argv.push_back( const_cast<char*>(cmd.c_str()) + offset );
}
char executablePath[PATH_MAX] = "";
std::string createProcessPath;
SUTIL_ASSERT( readlink( "/proc/self/exe", executablePath, sizeof( executablePath ) ) != -1 );
createProcessPath = executablePath;
createProcessPath.erase( createProcessPath.rfind( '/' ) + 1 );
createProcessPath += "optixModuleCreateProcess";
argv[0] = (char*)createProcessPath.c_str();
argv.push_back( nullptr ); // Terminate argument list
extern char **environ;
SUTIL_ASSERT( posix_spawn( &operation.process_id, createProcessPath.c_str(), nullptr, nullptr, argv.data(), environ ) == 0 );
#endif
compile_operations_in_flight.push_back( operation );
}
void createRadianceModule( PathTracerState& state )
{
OptixModuleCompileOptions module_compile_options ={};
OptixModuleCompileBoundValueEntry boundValue ={};
if( specialize )
{
boundValue.pipelineParamOffsetInBytes = offsetof( Params, light_samples );
boundValue.sizeInBytes = sizeof( Params::light_samples );
boundValue.boundValuePtr = &light_samples;
boundValue.annotation = "light_samples";
module_compile_options.numBoundValues = 1;
module_compile_options.boundValues = &boundValue;
}
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixModuleCreateAbort_ch.cu", inputSize );
std::string ptx( input, inputSize );
createModule( state.context, ptx, module_compile_options, state.pipeline_compile_options, state.ptx_module_radiance );
}
void createModule( PathTracerState& state )
{
OptixModuleCompileOptions module_compile_options = {};
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
state.pipeline_compile_options.numPayloadValues = 2;
state.pipeline_compile_options.numAttributeValues = 2;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixModuleCreateAbort.cu", inputSize );
std::string ptx( input, inputSize );
createModule( state.context, ptx, module_compile_options, state.pipeline_compile_options, state.ptx_module );
createRadianceModule( state );
}
void createRadianceProgramGroup( PathTracerState& state )
{
char log[2048];
size_t sizeof_log = sizeof( log );
OptixProgramGroupOptions program_group_options ={};
OptixProgramGroupDesc hit_prog_group_desc ={};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module_radiance;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.radiance_hit_group
) );
}
void createProgramGroups( PathTracerState& state )
{
OptixProgramGroupOptions program_group_options = {};
char log[2048];
size_t sizeof_log = sizeof( log );
{
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.raygen_prog_group
) );
}
{
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__radiance";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.radiance_miss_group
) );
memset( &miss_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = nullptr; // NULL miss program for occlusion rays
miss_prog_group_desc.miss.entryFunctionName = nullptr;
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_miss_group
) );
}
{
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
sizeof_log = sizeof( log );
OPTIX_CHECK( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_hit_group
) );
}
createRadianceProgramGroup( state );
}
void createPipeline( PathTracerState& state )
{
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.radiance_miss_group,
state.occlusion_miss_group,
state.radiance_hit_group,
state.occlusion_hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = 2;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline
) );
// We need to specify the max traversal depth. Calculate the stack sizes, so we can specify all
// parameters to optixPipelineSetStackSize.
OptixStackSizes stack_sizes = {};
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.raygen_prog_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.radiance_miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.occlusion_miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.radiance_hit_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.occlusion_hit_group, &stack_sizes ) );
uint32_t max_trace_depth = 2;
uint32_t max_cc_depth = 0;
uint32_t max_dc_depth = 0;
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes(
&stack_sizes,
max_trace_depth,
max_cc_depth,
max_dc_depth,
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state,
&continuation_stack_size
) );
const uint32_t max_traversal_depth = 1;
OPTIX_CHECK( optixPipelineSetStackSize(
state.pipeline,
direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state,
continuation_stack_size,
max_traversal_depth
) );
}
void allocateSBT( PathTracerState& state )
{
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>(&state.sbt.raygenRecord), raygen_record_size ) );
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>(&state.sbt.missRecordBase), miss_record_size * RAY_TYPE_COUNT ) );
state.sbt.missRecordStrideInBytes = static_cast<uint32_t>(miss_record_size);
state.sbt.missRecordCount = RAY_TYPE_COUNT;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>(&state.sbt.hitgroupRecordBase),
hitgroup_record_size * RAY_TYPE_COUNT * MAT_COUNT
) );
state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>(hitgroup_record_size);
state.sbt.hitgroupRecordCount = RAY_TYPE_COUNT * MAT_COUNT;
}
void fillSBT( PathTracerState& state )
{
const size_t raygen_record_size = sizeof( RayGenRecord );
RayGenRecord rg_sbt ={};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.raygenRecord),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
const size_t miss_record_size = sizeof( MissRecord );
MissRecord ms_sbt[2];
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_miss_group, &ms_sbt[0] ) );
ms_sbt[0].data.bg_color = make_float4( 0.0f );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_miss_group, &ms_sbt[1] ) );
ms_sbt[1].data.bg_color = make_float4( 0.0f );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.missRecordBase),
ms_sbt,
miss_record_size*RAY_TYPE_COUNT,
cudaMemcpyHostToDevice
) );
}
void fillHitGroupSBT( PathTracerState& state )
{
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
HitGroupRecord hitgroup_records[RAY_TYPE_COUNT * MAT_COUNT];
for( int i = 0; i < MAT_COUNT; ++i )
{
{
const int sbt_idx = i * RAY_TYPE_COUNT + 0; // SBT for radiance ray-type for ith material
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[sbt_idx].data.emission_color = g_emission_colors[i];
hitgroup_records[sbt_idx].data.diffuse_color = g_diffuse_colors[i];
hitgroup_records[sbt_idx].data.vertices = reinterpret_cast<float4*>(state.d_vertices);
}
{
const int sbt_idx = i * RAY_TYPE_COUNT + 1; // SBT for occlusion ray-type for ith material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
}
}
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>(state.sbt.hitgroupRecordBase),
hitgroup_records,
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT,
cudaMemcpyHostToDevice
) );
}
void updatePipeline( PathTracerState& state )
{
// Do not update in case one of the modules was not created yet
if( state.ptx_module == nullptr || state.ptx_module_radiance == nullptr )
{
return;
}
// destroy old stuff
if( state.pipeline != nullptr )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_miss_group ) );
}
createProgramGroups( state );
createRadianceProgramGroup( state );
createPipeline( state );
fillSBT(state);
fillHitGroupSBT( state );
// Force params.subframe_index to zero in updateState (which is called after updatePipeline in the render loop).
// This is done to ensure the frame buffer is reset when rendering with the new pipeline begins.
camera_changed = true;
}
void updatePipelineWhenChanged( PathTracerState& state )
{
if( compile_operations_in_flight.empty() )
return;
bool need_pipeline_update = false;
for( auto it = compile_operations_in_flight.begin(); it != compile_operations_in_flight.end(); )
{
#ifdef WIN32
// Check if the compile process has already exited
if( WaitForSingleObject( it->process_handle, 0 ) != WAIT_OBJECT_0 )
{
++it;
continue;
}
CompileOperation operation = *it;
// This compile operation is finished, so remove it from the list
it = compile_operations_in_flight.erase( it );
// Process has exited, so check the exit code to ensure module compilation was successfull
DWORD exit_code = EXIT_FAILURE;
GetExitCodeProcess( operation.process_handle, &exit_code );
CloseHandle( operation.process_handle );
// Delete temporary file
SUTIL_ASSERT( DeleteFileA( operation.temp_file.c_str() ) );
SUTIL_ASSERT_MSG( exit_code == 0, "Compile process was not successfull" );
#else
int status = -1;
if( waitpid( it->process_id, &status, WNOHANG ) == 0 )
{
++it;
continue;
}
CompileOperation operation = *it;
// This compile operation is finished, so remove it from the list
it = compile_operations_in_flight.erase( it );
// Delete temporary file
SUTIL_ASSERT( remove( operation.temp_file.c_str() ) == 0 );
SUTIL_ASSERT_MSG( WEXITSTATUS( status ) == 0, "Compile process was not successfull" );
#endif
operation.module_compile_options.boundValues = operation.bound_values.data();
// The module should be in cache, so simply create it again in this context
// This API call should now return instantenously (unless caching failed, in which case this would recompile the module again)
OPTIX_CHECK( optixModuleCreateFromPTX(
state.context,
&operation.module_compile_options,
&operation.pipeline_compile_options,
operation.ptx.c_str(),
operation.ptx.size(),
nullptr, 0,
operation.target_module
) );
need_pipeline_update = true;
}
if( need_pipeline_update )
{
updatePipeline( state );
}
}
void cleanupState( PathTracerState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.occlusion_miss_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module_radiance ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.params.accum_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
void displaySpecializationInfo( GLFWwindow* window )
{
static char display_text[256];
sutil::beginFrameImGui();
sprintf( display_text,
"light samples [+/-]: %d\n"
"specialization [S] : %s\n%s",
light_samples,
specialize ? "on" : "off",
!compile_operations_in_flight.empty() ? "[A] to abort compiling ...\n" : "");
Params& params = static_cast<PathTracerState*>(glfwGetWindowUserPointer( window ))->params;
sutil::displayText( display_text, 10.0f, (float)params.height - 70.f );
sutil::endFrameImGui();
}
int main( int argc, char* argv[] )
{
PathTracerState state;
state.params.width = 768;
state.params.height = 768;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--no-specialize" )
{
specialize = false;
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else if( arg == "--light-samples" || arg == "-l" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
light_samples = atoi( argv[++i] );
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext( state );
buildMeshAccel( state );
createModule( state );
allocateSBT( state );
initLaunchParams( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixModuleCreateAbort", state.params.width, state.params.height );
glfwSetMouseButtonCallback( window, mouseButtonCallback );
glfwSetCursorPosCallback( window, cursorPosCallback );
glfwSetWindowSizeCallback( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback( window, keyCallback );
glfwSetCharCallback( window, charCallback );
glfwSetScrollCallback( window, scrollCallback );
glfwSetWindowUserPointer( window, &state );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.width, state.params.height );
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updatePipelineWhenChanged( state );
updateState( output_buffer, state.params );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
sutil::displayStats( state_update_time, render_time, display_time );
displaySpecializationInfo( window );
glfwSwapBuffers( window );
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer( output_buffer_type, state.params.width, state.params.height );
output_buffer.setStream( state.stream );
handleCameraUpdate( state.params );
handleResize( output_buffer, state.params );
// Need to potentially wait on modules to compile since we are not in a render loop.
while( state.ptx_module_radiance == nullptr || state.ptx_module == nullptr )
updatePipelineWhenChanged( state );
launchSubframe( output_buffer, state );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixModuleCreateAbort/optixModuleCreateAbort.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixModuleCreateAbort.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->bg_color );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixModuleCreateAbort/optixModuleCreateAbort.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT
};
struct ParallelogramLight
{
float3 corner;
float3 v1, v2;
float3 normal;
float3 emission;
};
struct Params
{
unsigned int subframe_index;
float4* accum_buffer;
uchar4* frame_buffer;
unsigned int width;
unsigned int height;
unsigned int samples_per_launch;
unsigned int light_samples;
float3 eye;
float3 U;
float3 V;
float3 W;
ParallelogramLight light; // TODO: make light list
OptixTraversableHandle handle;
};
struct RayGenData
{
};
struct MissData
{
float4 bg_color;
};
struct HitGroupData
{
float3 emission_color;
float3 diffuse_color;
float4* vertices;
};
#if defined( __CUDACC__ )
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb( const float3& normal )
{
m_normal = normal;
if( fabs( m_normal.x ) > fabs( m_normal.z ) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize( m_binormal );
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform( float3& p ) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>(i0) << 32 | i1;
void* ptr = reinterpret_cast<void*>(uptr);
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>(ptr);
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>(unpackPointer( u0, u1 ));
}
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixModuleCreateAbort/optixModuleCreateAbort_ch.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixModuleCreateAbort.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
RadiancePRD* prd = getPRD();
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.light_samples; ++i )
{
const float z1 = rnd( seed );
const float z2 = rnd( seed );
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length( light_pos - P );
const float3 L = normalize( light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length( cross( light.v1, light.v2 ) );
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
result += (light.emission * weight);
}
}
}
prd->radiance += ( result / params.light_samples );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixModuleCreateAbort/optixModuleCreateProcess.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sutil/Exception.h>
#include <vector>
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
int main( int argc, char* argv[] )
{
// Parse command line options
int device = 0;
std::string filename;
OptixModuleCompileOptions module_compile_options = {};
OptixPipelineCompileOptions pipeline_compile_options = {};
std::vector<OptixModuleCompileBoundValueEntry> bound_values;
std::vector<std::vector<char>> bound_values_data;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( i + 1 >= argc )
{
std::cerr << "Incorrect number of arguments to option '" << arg << "'\n";
return 1;
}
// Context options
if( arg == "--file" )
{
filename = argv[++i];
}
else if( arg == "--device" )
{
device = atoi( argv[++i] );
}
// Module compile options
else if( arg == "--maxRegisterCount" )
{
module_compile_options.maxRegisterCount = atoi( argv[++i] );
}
else if( arg == "--optLevel" )
{
module_compile_options.optLevel = static_cast<OptixCompileOptimizationLevel>(atoi( argv[++i] ));
}
else if( arg == "--debugLevel" )
{
module_compile_options.debugLevel = static_cast<OptixCompileDebugLevel>(atoi( argv[++i] ));
}
else if( arg == "--boundValue" )
{
if( i + 4 >= argc )
{
std::cerr << "Incorrect number of arguments to option '--boundValue'\n";
return 1;
}
OptixModuleCompileBoundValueEntry bound_value = {};
bound_value.pipelineParamOffsetInBytes = atoi( argv[++i] );
bound_value.sizeInBytes = atoi( argv[++i] );
bound_value.annotation = argv[++i];
const std::string byte_data = argv[++i];
if( byte_data.size() != bound_value.sizeInBytes * 2 )
{
std::cerr << "Incorrect size of encoded bound value data (expected " << (bound_value.sizeInBytes * 2) << " characters, but got " << byte_data.size() << ")\n";
return 1;
}
// Allocate space for the value data and decode it from the command-line argument
std::vector<char> bound_value_data( bound_value.sizeInBytes );
for( size_t byte = 0; byte < bound_value.sizeInBytes; ++byte )
{
const char byte_low = (byte_data[byte * 2] - '0');
const char byte_high = (byte_data[byte * 2 + 1] - '0') << 4;
bound_value_data[byte] = byte_low | byte_high;
}
bound_value.boundValuePtr = bound_value_data.data();
bound_values.push_back( bound_value );
bound_values_data.push_back( std::move(bound_value_data) ); // Move vector here to data pointer stays valid
continue;
}
// Pipeline compile options
else if( arg == "--usesMotionBlur" )
{
pipeline_compile_options.usesMotionBlur = atoi( argv[++i] ) != 0;
}
else if( arg == "--traversableGraphFlags" )
{
pipeline_compile_options.traversableGraphFlags = atoi( argv[++i] );
}
else if( arg == "--numPayloadValues" )
{
pipeline_compile_options.numPayloadValues = atoi( argv[++i] );
}
else if( arg == "--numAttributeValues" )
{
pipeline_compile_options.numAttributeValues = atoi( argv[++i] );
}
else if( arg == "--exceptionFlags" )
{
pipeline_compile_options.exceptionFlags = atoi( argv[++i] );
}
else if( arg == "--pipelineLaunchParamsVariableName" )
{
pipeline_compile_options.pipelineLaunchParamsVariableName = argv[++i];
}
else if( arg == "--usesPrimitiveTypeFlags" )
{
pipeline_compile_options.usesPrimitiveTypeFlags = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << arg << "'\n";
return 1;
}
}
if (argc <= 1 || filename.empty())
{
std::cerr << "This executable is used by the 'optixModuleCreateAbort' sample to compile OptiX modules.\nIt is not meant to be called directly.\n";
std::cerr << "Usage : " << argv[0] << " [options]\n";
std::cerr << "Options: --file <path> PTX file to compile\n";
std::cerr << " --device <index> CUDA device index of the GPU to target\n";
std::cerr << " --maxRegisterCount <value> OptixModuleCompileOptions::maxRegisterCount\n";
std::cerr << " --optLevel <value> OptixModuleCompileOptions::optLevel\n";
std::cerr << " --debugLevel <value> OptixModuleCompileOptions::debugLevel\n";
std::cerr << " --usesMotionBlur <value> OptixModuleCompileOptions::usesMotionBlur\n";
std::cerr << " --traversableGraphFlags <value> OptixModuleCompileOptions::traversableGraphFlags\n";
std::cerr << " --numPayloadValues <value> OptixModuleCompileOptions::numPayloadValues\n";
std::cerr << " --numAttributeValues <value> OptixModuleCompileOptions::numAttributeValues\n";
std::cerr << " --exceptionFlags <value> OptixModuleCompileOptions::exceptionFlags\n";
std::cerr << " --pipelineLaunchParamsVariableName <value> OptixModuleCompileOptions::pipelineLaunchParamsVariableName\n";
std::cerr << " --usesPrimitiveTypeFlags <value> OptixModuleCompileOptions::usesPrimitiveTypeFlags\n";
std::cerr << " --boundValue <pipelineParamOffsetInBytes> <sizeInBytes> <annotation> <data>\n";
std::cerr << " Add entry to OptixModuleCompileOptions::boundValues\n";
return 1;
}
// Now that all bound values where parsed from the command-line, assign them to the compile options
if( !bound_values.empty() )
{
module_compile_options.boundValues = bound_values.data();
module_compile_options.numBoundValues = static_cast<unsigned int>(bound_values.size());
}
try
{
// Initialize CUDA
CUDA_CHECK( cudaSetDevice(device) );
CUDA_CHECK( cudaFree(0) );
// Initialize OptiX (using the CUDA context that was just initialized)
OptixDeviceContext context = nullptr;
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
OPTIX_CHECK( optixDeviceContextCreate( nullptr, &options, &context ) );
// Ensure disk cache is enabled, since this relies on the compile result being cached
int cache_enabled = false;
OPTIX_CHECK( optixDeviceContextGetCacheEnabled( context, &cache_enabled ) );
SUTIL_ASSERT( cache_enabled );
// Read the temporary input file
std::ifstream file( filename.c_str(), std::ios::binary );
SUTIL_ASSERT( file.is_open() );
std::vector<unsigned char> buffer = std::vector<unsigned char>( std::istreambuf_iterator<char>( file ), {} );
const std::string ptx_string( buffer.begin(), buffer.end() );
// Actually compile the module and store the result in the OptiX disk cache
char log[2048];
size_t sizeof_log = sizeof(log);
OptixModule module = nullptr;
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
context,
&module_compile_options,
&pipeline_compile_options,
ptx_string.c_str(), ptx_string.size(),
log, &sizeof_log,
&module ) );
// Clean up
optixModuleDestroy( module );
optixDeviceContextDestroy( context );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/motionHelper.hpp | C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <memory>
namespace {
struct Matrix3x4
{
// row-major matrix with 4 cols, 3 rows
float m[12];
inline static const Matrix3x4& Identity()
{
static Matrix3x4 m = { { 1.0, 0.0, 0.0, 0, 0.0, 1.0, 0.0, 0, 0.0, 0.0, 1.0, 0 } };
return m;
}
inline static Matrix3x4 Scale( const float3& s )
{
Matrix3x4 m = { { s.x, 0.0, 0.0, 0, 0.0, s.y, 0.0, 0, 0.0, 0.0, s.z, 0 } };
return m;
}
inline static Matrix3x4 Translation( const float3& t )
{
Matrix3x4 m = { { 1, 0.0, 0.0, t.x, 0.0, 1, 0.0, t.y, 0.0, 0.0, 1, t.z } };
return m;
}
Matrix3x4 operator*( const Matrix3x4& b ) const
{
Matrix3x4 result;
for( unsigned int r = 0; r < 3; ++r )
{
for( unsigned int c = 0; c < 4; ++c )
{
float sum = 0.0f;
for( unsigned int k = 0; k < 3; ++k )
{
float rk = this->m[r * 4 + k];
float kc = b.m[k * 4 + c];
sum += rk * kc;
}
if( c == 3 )
sum += this->m[r * 4 + c];
result.m[r * 4 + c] = sum;
}
}
return result;
}
float3 operator*( const float3& v ) const
{
float3 res;
res.x = v.x * m[0] + v.y * m[1] + v.z * m[2] + m[3];
res.y = v.x * m[4] + v.y * m[5] + v.z * m[6] + m[7];
res.z = v.x * m[8] + v.y * m[9] + v.z * m[10] + m[11];
return res;
}
// Returns the determinant of the matrix.
float determinant() const
{
float d =
m[0]*m[5]*m[10]*1-
m[0]*m[5]*m[11]*0+m[0]*m[9]*0*m[7]-
m[0]*m[9]*m[6]*1+m[0]*0*m[6]*m[11]-
m[0]*0*m[10]*m[7]-m[4]*m[1]*m[10]*1+m[4]*m[1]*m[11]*0-
m[4]*m[9]*0*m[3]+m[4]*m[9]*m[2]*1-
m[4]*0*m[2]*m[11]+m[4]*0*m[10]*m[3]+m[8]*m[1]*m[6]*1-
m[8]*m[1]*0*m[7]+m[8]*m[5]*0*m[3]-
m[8]*m[5]*m[2]*1+m[8]*0*m[2]*m[7]-
m[8]*0*m[6]*m[3]-
0*m[1]*m[6]*m[11]+0*m[1]*m[10]*m[7]-
0*m[5]*m[10]*m[3]+0*m[5]*m[2]*m[11]-
0*m[9]*m[2]*m[7]+0*m[9]*m[6]*m[3];
return d;
}
// Returns the inverse of the matrix.
Matrix3x4 inverse() const
{
Matrix3x4 result;
const float d = 1.0f / determinant();
result.m[0] = d * (m[5] * (m[10] * 1 - 0 * m[11]) + m[9] * (0 * m[7] - m[6] * 1) + 0 * (m[6] * m[11] - m[10] * m[7]));
result.m[4] = d * (m[6] * (m[8] * 1 - 0 * m[11]) + m[10] * (0 * m[7] - m[4] * 1) + 0 * (m[4] * m[11] - m[8] * m[7]));
result.m[8] = d * (m[7] * (m[8] * 0 - 0 * m[9]) + m[11] * (0 * m[5] - m[4] * 0) + 1 * (m[4] * m[9] - m[8] * m[5]));
result.m[1] = d * (m[9] * (m[2] * 1 - 0 * m[3]) + 0 * (m[10] * m[3] - m[2] * m[11]) + m[1] * (0 * m[11] - m[10] * 1));
result.m[5] = d * (m[10] * (m[0] * 1 - 0 * m[3]) + 0 * (m[8] * m[3] - m[0] * m[11]) + m[2] * (0 * m[11] - m[8] * 1));
result.m[9] = d * (m[11] * (m[0] * 0 - 0 * m[1]) + 1 * (m[8] * m[1] - m[0] * m[9]) + m[3] * (0 * m[9] - m[8] * 0));
result.m[2] = d * (0 * (m[2] * m[7] - m[6] * m[3]) + m[1] * (m[6] * 1 - 0 * m[7]) + m[5] * (0 * m[3] - m[2] * 1));
result.m[6] = d * (0 * (m[0] * m[7] - m[4] * m[3]) + m[2] * (m[4] * 1 - 0 * m[7]) + m[6] * (0 * m[3] - m[0] * 1));
result.m[10] = d * (1 * (m[0] * m[5] - m[4] * m[1]) + m[3] * (m[4] * 0 - 0 * m[5]) + m[7] * (0 * m[1] - m[0] * 0));
result.m[3] = d * (m[1] * (m[10] * m[7] - m[6] * m[11]) + m[5] * (m[2] * m[11] - m[10] * m[3]) + m[9] * (m[6] * m[3] - m[2] * m[7]));
result.m[7] = d * (m[2] * (m[8] * m[7] - m[4] * m[11]) + m[6] * (m[0] * m[11] - m[8] * m[3]) + m[10] * (m[4] * m[3] - m[0] * m[7]));
result.m[11] = d * (m[3] * (m[8] * m[5] - m[4] * m[9]) + m[7] * (m[0] * m[9] - m[8] * m[1]) + m[11] * (m[4] * m[1] - m[0] * m[5]));
return result;
}
};
class Quaternion
{
public:
Quaternion();
Quaternion( float x, float y, float z, float w );
Quaternion( const float3& axis, double angle );
Quaternion& operator*=( const Quaternion& q1 );
/** quaternion x, y, z, w */
float4 m_q;
};
Quaternion::Quaternion()
{
m_q = make_float4( 0.0f, 0.0f, 0.0f, 1.0f );
}
/*
Quaternion::Quaternion( float x, float y, float z, float w )
{
m_q = make_float4( x, y, z, w );
}
*/
Quaternion::Quaternion( const float3& axis, double angle )
{
const float3 naxis = normalize( axis );
const double radian = angle * ( M_PI / 180 );
const float s = (float)sin( radian / 2 );
m_q.x = naxis.x * s;
m_q.y = naxis.y * s;
m_q.z = naxis.z * s;
m_q.w = (float)cos( radian / 2 );
}
/*
Quaternion& Quaternion::operator*=( const Quaternion& q1 )
{
m_q = make_float4( m_q.w * q1.m_q.x + m_q.x * q1.m_q.w + m_q.y * q1.m_q.z - m_q.z * q1.m_q.y,
m_q.w * q1.m_q.y + m_q.y * q1.m_q.w + m_q.z * q1.m_q.x - m_q.x * q1.m_q.z,
m_q.w * q1.m_q.z + m_q.z * q1.m_q.w + m_q.x * q1.m_q.y - m_q.y * q1.m_q.x,
m_q.w * q1.m_q.w - m_q.x * q1.m_q.x - m_q.y * q1.m_q.y - m_q.z * q1.m_q.z );
return *this;
}
*/
static inline Quaternion nlerp( const Quaternion& quat0, const Quaternion& quat1, float t )
{
Quaternion q;
q.m_q = lerp( quat0.m_q, quat1.m_q, t );
q.m_q = normalize( q.m_q );
return q;
}
OptixSRTData lerp( const OptixSRTData& a, const OptixSRTData& b, float t )
{
OptixSRTData r;
r.sx = ::lerp( a.sx, b.sx, t );
r.a = ::lerp( a.a, b.a, t );
r.b = ::lerp( a.b, b.b, t );
r.pvx = ::lerp( a.pvx, b.pvx, t );
r.sy = ::lerp( a.sy, b.sy, t );
r.c = ::lerp( a.c, b.c, t );
r.pvy = ::lerp( a.pvy, b.pvy, t );
r.sz = ::lerp( a.sz, b.sz, t );
r.pvz = ::lerp( a.pvz, b.pvz, t );
r.qx = ::lerp( a.qx, b.qx, t );
r.qy = ::lerp( a.qy, b.qy, t );
r.qz = ::lerp( a.qz, b.qz, t );
r.qw = ::lerp( a.qw, b.qw, t );
r.tx = ::lerp( a.tx, b.tx, t );
r.ty = ::lerp( a.ty, b.ty, t );
r.tz = ::lerp( a.tz, b.tz, t );
const float inv_qLength = 1.f / sqrtf( r.qx * r.qx + r.qy * r.qy + r.qz * r.qz + r.qw * r.qw );
r.qx *= inv_qLength;
r.qy *= inv_qLength;
r.qz *= inv_qLength;
r.qw *= inv_qLength;
return r;
}
void srtToMatrix( const OptixSRTData& srt, float* m )
{
const float4 q = make_float4( srt.qx, srt.qy, srt.qz, srt.qw );
// q is assumed to be normalized, but to be sure, normalize again
const float inv_sql = 1.f / ( srt.qx * srt.qx + srt.qy * srt.qy + srt.qz * srt.qz + srt.qw * srt.qw );
const float4 nq = make_float4( q.x * inv_sql, q.y * inv_sql, q.z * inv_sql, q.w * inv_sql );
const float sqw = q.w * nq.w;
const float sqx = q.x * nq.x;
const float sqy = q.y * nq.y;
const float sqz = q.z * nq.z;
const float xy = q.x * nq.y;
const float zw = q.z * nq.w;
const float xz = q.x * nq.z;
const float yw = q.y * nq.w;
const float yz = q.y * nq.z;
const float xw = q.x * nq.w;
m[0] = ( sqx - sqy - sqz + sqw );
m[1] = 2.0f * ( xy - zw );
m[2] = 2.0f * ( xz + yw );
m[4] = 2.0f * ( xy + zw );
m[5] = ( -sqx + sqy - sqz + sqw );
m[6] = 2.0f * ( yz - xw );
m[8] = 2.0f * ( xz - yw );
m[9] = 2.0f * ( yz + xw );
m[10] = ( -sqx - sqy + sqz + sqw );
m[3] = m[0] * srt.pvx + m[1] * srt.pvy + m[2] * srt.pvz + srt.tx;
m[7] = m[4] * srt.pvx + m[5] * srt.pvy + m[6] * srt.pvz + srt.ty;
m[11] = m[8] * srt.pvx + m[9] * srt.pvy + m[10] * srt.pvz + srt.tz;
m[2] = m[0] * srt.b + m[1] * srt.c + m[2] * srt.sz;
m[6] = m[4] * srt.b + m[5] * srt.c + m[6] * srt.sz;
m[10] = m[8] * srt.b + m[9] * srt.c + m[10] * srt.sz;
m[1] = m[0] * srt.a + m[1] * srt.sy;
m[5] = m[4] * srt.a + m[5] * srt.sy;
m[9] = m[8] * srt.a + m[9] * srt.sy;
m[0] = m[0] * srt.sx;
m[4] = m[4] * srt.sx;
m[8] = m[8] * srt.sx;
}
template <unsigned int motionKeys>
struct alignas( 64 ) MatrixMotionTransformFixedSize : OptixMatrixMotionTransform
{
// must be strictly after OptixMatrixMotionTransform::transform
float additionalTransforms[motionKeys - 2][12];
MatrixMotionTransformFixedSize()
{
//static_assert(sizeof(MatrixMotionTransform<motionKeys>) == sizeof(OptixMatrixMotionTransform)+(motionKeys-2)*12*sizeof(float), "size/alignment error");
motionOptions.numKeys = motionKeys;
}
float* motionKey( unsigned int key ) { return transform[key]; }
};
template <>
struct alignas( 64 ) MatrixMotionTransformFixedSize<2> : OptixMatrixMotionTransform
{
MatrixMotionTransformFixedSize() { motionOptions.numKeys = 2; }
float* motionKey( unsigned int key ) { return transform[key]; }
};
template <class Derived, typename OptixDataType>
class MotionTransformArrayBase
{
public:
MotionTransformArrayBase( size_t numTransforms, unsigned int numKeys )
: m_numTransforms( numTransforms )
, m_numKeys( numKeys )
{
if( numKeys < 2 )
numKeys = 2;
if( numTransforms )
m_data = std::unique_ptr<char[]>( new char[numTransforms * Derived::byteSizePerTransform( numKeys )] );
}
MotionTransformArrayBase( const MotionTransformArrayBase& other )
: MotionTransformArrayBase( other.m_numTransforms, other.m_numKeys )
{
memcpy( data(), other.data(), byteSize() );
}
MotionTransformArrayBase( MotionTransformArrayBase&& other ) noexcept
: MotionTransformArrayBase( 0, 0 )
{
swap( *this, other );
}
friend void swap( MotionTransformArrayBase& a, MotionTransformArrayBase& b )
{
using std::swap;
swap( a.m_numTransforms, b.m_numTransforms );
swap( a.m_numKeys, b.m_numKeys );
swap( a.m_data, b.m_data );
}
MotionTransformArrayBase& operator=( MotionTransformArrayBase other )
{
swap( *this, other );
return *this;
}
MotionTransformArrayBase& operator=( MotionTransformArrayBase&& other )
{
swap( *this, other );
return *this;
}
OptixDataType& transform( size_t transformIdx )
{
return *(OptixDataType*)( (char*)m_data.get() + Derived::byteSizePerTransform( m_numKeys ) * transformIdx );
}
const OptixDataType& transform( size_t transformIdx ) const
{
return *(OptixDataType*)( (char*)m_data.get() + Derived::byteSizePerTransform( m_numKeys ) * transformIdx );
}
void* data() { return m_data.get(); }
const void* data() const { return m_data.get(); }
unsigned int numKeys() const { return m_numKeys; }
size_t numTransforms() const { return m_numTransforms; }
size_t byteSizePerTransform() const { return Derived::byteSizePerTransform( m_numKeys ); }
size_t byteSize() const { return m_numTransforms * Derived::byteSizePerTransform( m_numKeys ); }
protected:
static size_t roundUp64( size_t i ) { return ( ( i + 64 - 1 ) / 64 ) * 64; }
private:
size_t m_numTransforms = 0;
unsigned int m_numKeys = 2;
std::unique_ptr<char[]> m_data;
};
class MatrixMotionTransformArray : public MotionTransformArrayBase<MatrixMotionTransformArray, OptixMatrixMotionTransform>
{
public:
typedef MotionTransformArrayBase<MatrixMotionTransformArray, OptixMatrixMotionTransform> Base;
MatrixMotionTransformArray( size_t numTransforms = 0, unsigned int numKeys = 2 )
: Base( numTransforms, numKeys )
{
}
float* motionData( size_t transformIdx ) { return &transform( transformIdx ).transform[0][0]; }
const float* motionData( size_t transformIdx ) const { return &transform( transformIdx ).transform[0][0]; }
float* motionData( size_t transformIdx, unsigned int key ) { return &transform( transformIdx ).transform[key][0]; }
const float* motionData( size_t transformIdx, unsigned int key ) const
{
return &transform( transformIdx ).transform[key][0];
}
using Base::byteSizePerTransform;
static size_t byteSizePerTransform( unsigned int numKeys )
{
// pad to 64 bytes to ensure 64 byte alignment when using byteSize() to compute size for array of motion transforms with N keys
return roundUp64( sizeof( OptixMatrixMotionTransform ) + sizeof( float ) * 12 * ( numKeys - 2 ) );
}
};
class SRTMotionTransformArray : public MotionTransformArrayBase<SRTMotionTransformArray, OptixSRTMotionTransform>
{
public:
typedef MotionTransformArrayBase<SRTMotionTransformArray, OptixSRTMotionTransform> Base;
SRTMotionTransformArray( size_t numTransforms = 0, unsigned int numKeys = 2 )
: Base( numTransforms, numKeys )
{
}
OptixSRTData* motionData( size_t transformIdx ) { return transform( transformIdx ).srtData; }
const OptixSRTData* motionData( size_t transformIdx ) const { return transform( transformIdx ).srtData; }
OptixSRTData& motionData( size_t transformIdx, unsigned int key ) { return transform( transformIdx ).srtData[key]; }
const OptixSRTData& motionData( size_t transformIdx, unsigned int key ) const
{
return transform( transformIdx ).srtData[key];
}
using Base::byteSizePerTransform;
static size_t byteSizePerTransform( unsigned int numKeys )
{
// pad to 64 bytes to ensure 64 byte alignment when using byteSize() to compute size for array of motion transforms with N keys
return roundUp64( sizeof( OptixSRTMotionTransform ) + sizeof( OptixSRTData ) * ( numKeys - 2 ) );
}
};
class MatrixMotionTransform : public MatrixMotionTransformArray
{
public:
typedef MatrixMotionTransformArray Base;
MatrixMotionTransform( unsigned int numKeys = 2 )
: MatrixMotionTransformArray( 1, numKeys )
{
}
float* motionData( unsigned int key ) { return Base::motionData( 0, key ); }
const float* motionData( unsigned int key ) const { return Base::motionData( 0, key ); }
private:
using Base::numTransforms;
};
OptixSRTData buildSRT( const float3& scale,
const float3& shear,
const float3& scaleShearPivot,
const Quaternion& q,
const float3& rotationPivot,
const float3& translation )
{
// Note that a pivot is a point and to do a transformation wrt. a pivot point, we need to apply an inverse translation (hence -pivot) before the transformation
// to go back to 'world' space, the inverse of the pivot translation is applied (+pivot)
// multiply scale and shear with the scaleShearPivot to bake the pivot into the S transformation, like: S * (p - p') = S*p - S*p'
float3 rotationPivotScalePivot = {
scale.x * -scaleShearPivot.x + shear.x * -scaleShearPivot.y + shear.y * -scaleShearPivot.z,
scale.y * -scaleShearPivot.y + shear.z * -scaleShearPivot.z, scale.z * -scaleShearPivot.z };
// undo scale pivot after applying scale transformation
rotationPivotScalePivot += scaleShearPivot;
// apply pivot for rotation
// SRT definition actually wants the pivot point, instead of the transformation for the pivot point
// hence, we need to add the pivot point instead of subtracting it
rotationPivotScalePivot -= rotationPivot;
// apply translation and undo rotation pivot
float3 translationM1RotationPivot = translation + rotationPivot;
return { scale.x,
shear.x,
shear.y,
rotationPivotScalePivot.x,
scale.y,
shear.z,
rotationPivotScalePivot.y,
scale.z,
rotationPivotScalePivot.z,
q.m_q.x,
q.m_q.y,
q.m_q.z,
q.m_q.w,
translationM1RotationPivot.x,
translationM1RotationPivot.y,
translationM1RotationPivot.z };
}
OptixSRTData buildSRT( const float3& scale, const Quaternion& q, const float3& translation )
{
return buildSRT( scale, make_float3( 0.0f ), make_float3( 0.0f ), q, make_float3( 0.0f ), translation );
}
} // namespace | yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/optixMotionGeometry.cpp | C++ | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h" // Needs to be included before gl_interop
#undef TINYOBJLOADER_IMPLEMENTATION
#include <cuda_gl_interop.h>
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/Aabb.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Camera.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/Trackball.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <sutil/Scene.h>
#include <optix_stack_size.h>
#include <GLFW/glfw3.h>
#include "optixMotionGeometry.h"
#include "vertices.h"
#include "motionHelper.hpp"
#include <cstdlib>
#include <array>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int32_t mouse_button = -1;
//------------------------------------------------------------------------------
//
// Local types
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
class ExhaustFume
{
public:
ExhaustFume()
{}
ExhaustFume(ExhaustFume&& other)
{
swap( other );
}
ExhaustFume& operator=(ExhaustFume&& other)
{
swap( other );
return *this;
}
void swap( ExhaustFume& other ) // nothrow
{
// enable ADL (not necessary in our case, but good practice)
using std::swap;
// by swapping the members of two objects,
// the two objects are effectively swapped
swap(d_exploding_gas_output_buffer, other.d_exploding_gas_output_buffer);
swap(exploding_gas_handle, other.exploding_gas_handle);
swap( srt_animation, other.srt_animation );
swap( d_srt, other.d_srt );
swap( timeLastASRebuild, other.timeLastASRebuild );
swap( localTime, other.localTime );
swap( rotationSpeed, other.rotationSpeed );
swap( lastRotationDegree, other.lastRotationDegree );
swap( relativeEjectionSpeed, other.relativeEjectionSpeed );
swap( remove, other.remove );
swap( baseP, other.baseP );
}
~ExhaustFume()
{
cudaFree( (void*)d_exploding_gas_output_buffer );
cudaFree( (void*)d_srt );
}
CUdeviceptr d_exploding_gas_output_buffer = 0;
OptixTraversableHandle exploding_gas_handle = 0;
SRTMotionTransformArray srt_animation;
CUdeviceptr d_srt = 0;
float timeLastASRebuild = 0.f;
float localTime = 0.f;
float rotationSpeed = 0.f; // rotation speed of plane at time of ejection
float lastRotationDegree = 0.f;
float relativeEjectionSpeed = 1; // relative ejection speed to give the different fumes some variation
bool remove = false;
float3 baseP;
};
struct MotionGeometryState
{
OptixDeviceContext context = 0;
size_t temp_buffer_size = 0;
CUdeviceptr d_temp_buffer = 0;
CUdeviceptr d_temp_vertices[2] = {};
CUdeviceptr d_instances = 0;
size_t d_instances_size = 0;
unsigned int triangle_flags = OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT;
OptixBuildInput ias_instance_input = {};
OptixBuildInput triangle_input = {};
OptixBuildInput triangle_input_fume = {};
OptixAccelBuildOptions ias_accel_options ={};
OptixTraversableHandle ias_handle;
OptixTraversableHandle static_gas_handle;
OptixTraversableHandle deforming_gas_handle;
OptixTraversableHandle exploding_gas_handle;
OptixTraversableHandle plane_gas_handle;
OptixTraversableHandle planePropeller_gas_handle;
CUdeviceptr d_ias_output_buffer = 0;
CUdeviceptr d_static_gas_output_buffer = 0;
CUdeviceptr d_deforming_gas_output_buffer = 0;
CUdeviceptr d_plane_gas_output_buffer = 0;
CUdeviceptr d_planePropeller_gas_output_buffer = 0;
sutil::Aabb planeAabb;
size_t ias_output_buffer_size = 0;
size_t static_gas_output_buffer_size = 0;
size_t deforming_gas_output_buffer_size = 0;
size_t exploding_gas_output_buffer_size = 0;
OptixModule ptx_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup miss_group = 0;
OptixProgramGroup miss_group_occlusion = 0;
OptixProgramGroup hit_group = 0;
CUstream stream = 0;
Params params;
Params* d_params;
float time = 0.f;
float time_last_frame = 0.f;
float time_last_fume = 0.f;
float targetFrameTime = 0.0333f;
OptixShaderBindingTable sbt = {};
std::vector<OptixInstance> instances;
std::vector<ExhaustFume> fume;
bool followPlane = false;
bool renderAO = true;
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t g_tessellation_resolution = 128;
const int32_t g_tessellation_resolution_fume = g_tessellation_resolution / 8;
const float g_exploding_gas_rebuild_frequency = 10.f;
struct PlaneAnimation {
SRTMotionTransformArray srt_animation;
SRTMotionTransformArray srt_animationPropeller;
CUdeviceptr d_srts;
CUdeviceptr d_srtsPropeller;
float planeSpeed = 0.4f;
float lastRotationDegree = 120;
} plane;
struct DeformSphereAnimation {
MatrixMotionTransformArray matrix_animation;
CUdeviceptr d_matrices;
float rotationSpeed = 0.1f;
float lastRotationDegree = 0;
} deformSphere;
void addFume( MotionGeometryState& state );
void createModule( MotionGeometryState& state );
void createProgramGroups( MotionGeometryState& state );
void createPipeline( MotionGeometryState& state );
void buildMeshAccel( MotionGeometryState& state );
void createSBT( MotionGeometryState& state );
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking( static_cast< int >( xpos ), static_cast< int >( ypos ) );
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
Params& params = static_cast<MotionGeometryState*>( glfwGetWindowUserPointer( window ) )->params;
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast< int >( xpos ), static_cast< int >( ypos ), params.width, params.height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast< int >( xpos ), static_cast< int >( ypos ), params.width, params.height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
Params& params = static_cast< MotionGeometryState* >( glfwGetWindowUserPointer( window ) )->params;
params.width = res_x;
params.height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
MotionGeometryState& state = *static_cast<MotionGeometryState*>( glfwGetWindowUserPointer( window ) );
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q || key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
else if( key == GLFW_KEY_DOWN )
{
state.targetFrameTime *= 2;
}
else if( key == GLFW_KEY_UP )
{
state.targetFrameTime /= 2;
}
else if( key == GLFW_KEY_M )
{
plane.planeSpeed *= 1.5f;
}
else if( key == GLFW_KEY_N )
{
plane.planeSpeed /= 1.5f;
}
else if( key == GLFW_KEY_J )
{
deformSphere.rotationSpeed *= 1.5f;
}
else if( key == GLFW_KEY_H )
{
deformSphere.rotationSpeed /= 1.5f;
}
else if( key == GLFW_KEY_V )
{
state.followPlane = !state.followPlane;
}
else if( key == GLFW_KEY_B )
{
addFume( state );
}
else if( key == GLFW_KEY_A )
{
state.renderAO = !state.renderAO;
createModule( state );
createProgramGroups( state );
createPipeline( state );
createSBT( state );
state.params.ao = state.renderAO;
}
}
static void scrollCallback( GLFWwindow* window, double xscroll, double yscroll )
{
if( trackball.wheelEvent( ( int )yscroll ) )
camera_changed = true;
}
//------------------------------------------------------------------------------
//
// Helper functions
// TODO: some of these should move to sutil or optix util header
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --time | -t Animation time for image output (default 1)\n";
std::cerr << " --frames | -n Number of animation frames for image output (default 16)\n";
std::cerr << " --no-gl-interop Disable GL interop for display\n";
std::cerr << " --dim=<width>x<height> Set image dimensions; defaults to 1024x768\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initLaunchParams( MotionGeometryState& state )
{
state.params.frame_buffer = nullptr; // Will be set when output buffer is mapped
state.params.subframe_index = 0u;
state.params.spp = 1u;
state.params.ao = true;
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_params ), sizeof( Params ) ) );
}
float3 getPlaneWSPos( const MotionGeometryState& state )
{
OptixSRTData lerped = lerp( plane.srt_animation.motionData( 0, 0 ), plane.srt_animation.motionData( 0, 1 ), 0.5f );
Matrix3x4 m;
srtToMatrix( lerped, m.m );
return m * state.planeAabb.center();
}
void handleCameraUpdate( MotionGeometryState& state )
{
if( state.followPlane )
{
float3 l = camera.lookat();
float3 planePos = getPlaneWSPos( state );
float3 offset = planePos - l;
camera.setLookat( planePos );
camera.setEye( camera.eye() + offset );
trackball.reinitOrientationFromCamera();
camera_changed = true;
}
if( !camera_changed )
return;
camera_changed = false;
Params& params = state.params;
camera.setAspectRatio( static_cast< float >( params.width ) / static_cast< float >( params.height ) );
params.eye = camera.eye();
camera.UVWFrame( params.U, params.V, params.W );
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, Params& params )
{
if( !resize_dirty )
return;
resize_dirty = false;
output_buffer.resize( params.width, params.height );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, MotionGeometryState& state )
{
// Launch
uchar4* result_buffer_data = output_buffer.map();
state.params.frame_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync(
reinterpret_cast< void* >( state.d_params ),
&state.params, sizeof( Params ),
cudaMemcpyHostToDevice, state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast< CUdeviceptr >( state.d_params ),
sizeof( Params ),
&state.sbt,
state.params.width, // launch width
state.params.height, // launch height
1 // launch depth
) );
output_buffer.unmap();
CUDA_SYNC_CHECK();
}
void displaySubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, sutil::GLDisplay& gl_display, GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
output_buffer.width(),
output_buffer.height(),
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */ )
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: " << message << "\n";
}
void initCameraState()
{
camera.setEye( make_float3( -7.f, 3.f, -5.f ) );
camera.setLookat( make_float3( 0 ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame(
make_float3( 1.0f, 0.0f, 0.0f ),
make_float3( 0.0f, 0.0f, 1.0f ),
make_float3( 0.0f, 1.0f, 0.0f )
);
trackball.setGimbalLock( true );
}
void createContext( MotionGeometryState& state )
{
// Initialize CUDA
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cu_ctx = 0; // zero means take the current context
OPTIX_CHECK( optixInit() );
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cu_ctx, &options, &context ) );
state.context = context;
}
void launchGenerateAnimatedVertices( MotionGeometryState& state, AnimationMode animation_mode, float time_last_frame, float time_now, int tessellation_resolution )
{
generateAnimatedVetrices( (float3*)state.d_temp_vertices[0], animation_mode, time_last_frame, tessellation_resolution, tessellation_resolution );
generateAnimatedVetrices( (float3*)state.d_temp_vertices[1], animation_mode, time_now, tessellation_resolution, tessellation_resolution );
}
float randf()
{
return static_cast<float>( rand() ) / static_cast<float>( RAND_MAX );
}
void addFume( MotionGeometryState& state )
{
ExhaustFume fume ={};
fume.baseP = getPlaneWSPos( state );
fume.rotationSpeed = plane.planeSpeed * (randf() * 0.1f + 0.8f);
fume.lastRotationDegree = plane.lastRotationDegree;
fume.relativeEjectionSpeed = randf() * 0.4f + 0.6f;
// using an array is overkill here, but
fume.srt_animation = SRTMotionTransformArray( 1, 2 );
CUDA_CHECK( cudaMalloc( (void**)&fume.d_srt, fume.srt_animation.byteSize() ) );
OptixSRTMotionTransform& t = fume.srt_animation.transform( 0 );
t.motionOptions.flags = 0;
t.motionOptions.numKeys = fume.srt_animation.numKeys();
t.motionOptions.timeBegin = 0;
t.motionOptions.timeEnd = 1;
fume.srt_animation.motionData( 0, 0 ) = plane.srt_animation.motionData( 0, 1 );
fume.srt_animation.motionData( 0, 1 ) = plane.srt_animation.motionData( 0, 1 );
//// Generate exploding sphere vertices
launchGenerateAnimatedVertices( state, AnimationMode_None, 0, 0, g_tessellation_resolution_fume );
OptixAccelBuildOptions gas_accel_options = {};
gas_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE
| OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS | OPTIX_BUILD_FLAG_PREFER_FAST_TRACE;
gas_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
gas_accel_options.motionOptions.numKeys = 2;
gas_accel_options.motionOptions.timeBegin = 0;
gas_accel_options.motionOptions.timeEnd = 1;
OptixAccelBufferSizes s;
optixAccelComputeMemoryUsage( state.context, &gas_accel_options, &state.triangle_input_fume, 1, &s );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &fume.d_exploding_gas_output_buffer ), state.exploding_gas_output_buffer_size ) );
OPTIX_CHECK( optixAccelBuild( state.context,
state.stream, // CUDA stream
&gas_accel_options, &state.triangle_input_fume,
1, // num build inputs
state.d_temp_buffer, state.temp_buffer_size,
fume.d_exploding_gas_output_buffer, state.exploding_gas_output_buffer_size,
&fume.exploding_gas_handle,
nullptr, 0 // emitted property list
) );
state.fume.emplace_back( std::move( fume ) );
}
void updateMeshAccel( MotionGeometryState& state )
{
// Generate deformed sphere vertices
launchGenerateAnimatedVertices( state, AnimationMode_Deform, state.time_last_frame, state.time, g_tessellation_resolution );
// Update deforming GAS
OptixAccelBuildOptions gas_accel_options = {};
gas_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS | OPTIX_BUILD_FLAG_PREFER_FAST_TRACE;
gas_accel_options.operation = OPTIX_BUILD_OPERATION_UPDATE;
gas_accel_options.motionOptions.numKeys = 2;
gas_accel_options.motionOptions.timeBegin = 0;
gas_accel_options.motionOptions.timeEnd = 1;
OPTIX_CHECK( optixAccelBuild(
state.context,
state.stream, // CUDA stream
&gas_accel_options,
&state.triangle_input,
1, // num build inputs
state.d_temp_buffer,
state.temp_buffer_size,
state.d_deforming_gas_output_buffer,
state.deforming_gas_output_buffer_size,
&state.deforming_gas_handle,
nullptr, // emitted property list
0 // num emitted properties
) );
#if 1
{
float timePassed = state.time - state.time_last_frame;
for( size_t i=0; i<state.fume.size(); ++i )
{
ExhaustFume& fume = state.fume[i];
if( fume.remove )
{
state.fume.erase( state.fume.begin() + i );
i--;
continue;
}
OptixAccelBuildOptions fume_gas_accel_options = gas_accel_options;
fume_gas_accel_options.operation = OPTIX_BUILD_OPERATION_UPDATE;
// Generate exploding sphere vertices
// and update the movement (instance) animation
const float animationLength = M_PIf * 0.5f;
const float timeOffset = M_PIf;
const float maxTime = animationLength + timeOffset;
const float localTime = fume.localTime + timeOffset;
if( localTime + timePassed >= maxTime )
{
launchGenerateAnimatedVertices( state, AnimationMode_Explode, localTime, maxTime, g_tessellation_resolution_fume );
fume_gas_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
fume_gas_accel_options.motionOptions.timeEnd = ( maxTime - localTime ) / timePassed;
fume_gas_accel_options.buildFlags |= OPTIX_MOTION_FLAG_END_VANISH;
OptixSRTMotionTransform& t = fume.srt_animation.transform( 0 );
t.motionOptions.timeEnd = ( maxTime - localTime ) / timePassed;
t.motionOptions.flags |= OPTIX_MOTION_FLAG_END_VANISH;
fume.remove = true;
}
else
{
launchGenerateAnimatedVertices( state, AnimationMode_Explode, localTime, localTime + timePassed, g_tessellation_resolution_fume );
}
{
float3 scale = make_float3( 0.05f );
float3 shear = make_float3( 0 );
float3 scaleShearPivot = make_float3( 0 );
float3 rotationPivot = make_float3( 0, -1.2f, 0.0f );
float3 translation = make_float3( 0, 1.2f, 0.0f );
float3 ejectTrans = fume.relativeEjectionSpeed * make_float3( 0, 0.2f, 0 );
float rotations = fume.rotationSpeed * timePassed * 360;
float oldRot = fume.lastRotationDegree;
float newRot = oldRot + rotations;
if( newRot >= 360 )
{
oldRot -= 360;
newRot -= 360;
}
fume.lastRotationDegree = newRot;
fume.srt_animation.motionData( 0, 0 ) =
buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 1, 0, 0 ), oldRot ), rotationPivot - fume.localTime * ejectTrans,
translation + fume.localTime * ejectTrans );
fume.srt_animation.motionData( 0, 1 ) =
buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 1, 0, 0 ), newRot ), rotationPivot - ( fume.localTime + timePassed ) * ejectTrans,
translation + ( fume.localTime + timePassed ) * ejectTrans );
}
// Occasionally rebuild to maintain AS quality
if( fume.localTime + timePassed - fume.timeLastASRebuild > 1 / g_exploding_gas_rebuild_frequency )
{
fume_gas_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
fume.timeLastASRebuild = fume.localTime + timePassed;
}
fume.localTime += timePassed;
OptixAccelBufferSizes s;
optixAccelComputeMemoryUsage( state.context, &fume_gas_accel_options, &state.triangle_input_fume, 1, &s );
OPTIX_CHECK( optixAccelBuild( state.context, state.stream,
&fume_gas_accel_options, &state.triangle_input_fume,
1, // num build inputs
state.d_temp_buffer, state.temp_buffer_size,
fume.d_exploding_gas_output_buffer, state.exploding_gas_output_buffer_size,
&fume.exploding_gas_handle,
nullptr, 0 // emitted property list
) );
}
}
#endif
// Update the IAS
// We refit the IAS as the relative positions of the spheres don't change much so AS quality after update is fine.
#if 1
{
{
float3 scale = make_float3( 0.2f );
float3 shear = make_float3( 0 );
float3 scaleShearPivot = make_float3( 0 );
float3 rotationPivot = make_float3( 3, 0, 0 );
float3 translation = -rotationPivot;
float rotationsPerSecond = deformSphere.rotationSpeed;
float timePassed = state.time - state.time_last_frame;
float rotations = rotationsPerSecond * timePassed * 360;
float oldRot = deformSphere.lastRotationDegree;
float newRot = oldRot + rotations;
if( newRot >= 360 )
{
oldRot -= 360;
newRot -= 360;
}
deformSphere.lastRotationDegree = fmodf(newRot, 360);
for(unsigned int i=0; i<deformSphere.matrix_animation.numKeys(); ++i)
{
srtToMatrix( buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 0, 0, 1 ), lerp(oldRot, newRot, i / (deformSphere.matrix_animation.numKeys()-1.f)) ),
rotationPivot, translation ),
deformSphere.matrix_animation.motionData( 0, i ) );
}
}
{
float3 scale = make_float3( 0.02f );
float3 shear = make_float3( 0 );
float3 scaleShearPivot = make_float3( 0 );
float3 rotationPivot = make_float3( 0, -1.2f, 0.0f );
float3 translation = make_float3( 0, 1.2f, 0.0f );
float rotationsPerSecond = plane.planeSpeed;
float timePassed = state.time - state.time_last_frame;
float rotations = rotationsPerSecond * timePassed * 360;
plane.srt_animation.motionData( 0, 0 ) = plane.srt_animation.motionData( 0, 1 );
float oldRot = plane.lastRotationDegree;
float newRot = oldRot + rotations;
if( newRot >= 360 )
{
oldRot -= 360;
newRot -= 360;
}
plane.lastRotationDegree = fmodf( newRot, 360 );
plane.srt_animation.motionData( 0, 0 ) =
buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 1, 0, 0 ), oldRot ), rotationPivot, translation );
plane.srt_animation.motionData( 0, 1 ) =
buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 1, 0, 0 ), newRot ), rotationPivot, translation );
plane.srt_animation.motionData( 1, 0 ) = plane.srt_animation.motionData( 0, 0 );
plane.srt_animation.motionData( 1, 1 ) = plane.srt_animation.motionData( 0, 1 );
}
{
float3 scale = make_float3( 1 );
float3 shear = make_float3( 0 );
float3 scaleShearPivot = make_float3( 0 );
float3 rotationPivot = make_float3( 0.0003f, -0.4179f, 0.0f );
float3 translation = make_float3( 0 );
const float rotationsPerSecond = 5;
float lastLocalt = fmodf( state.time_last_frame, 1.f / rotationsPerSecond );
float nowlocalt = fmodf( state.time, 1.f / rotationsPerSecond );
if( lastLocalt > nowlocalt )
lastLocalt -= 1.f / rotationsPerSecond;
for( unsigned int i = 0; i < plane.srt_animationPropeller.numKeys(); ++i )
{
plane.srt_animationPropeller.motionData( 0, i ) =
buildSRT( scale, shear, scaleShearPivot,
Quaternion( make_float3( 0, 0, 1 ), lerp(lastLocalt, nowlocalt, i / (plane.srt_animationPropeller.numKeys() - 1.f)) * 360.f * rotationsPerSecond ),
rotationPivot, translation);
}
}
CUDA_CHECK( cudaMemcpy( (char*)deformSphere.d_matrices, deformSphere.matrix_animation.data(), deformSphere.matrix_animation.byteSize(), cudaMemcpyHostToDevice ) );
CUDA_CHECK( cudaMemcpy( (char*)plane.d_srts, plane.srt_animation.data(), plane.srt_animation.byteSize(), cudaMemcpyHostToDevice ) );
CUDA_CHECK( cudaMemcpy( (char*)plane.d_srtsPropeller, plane.srt_animationPropeller.data(), plane.srt_animationPropeller.byteSize(), cudaMemcpyHostToDevice ) );
}
#endif
#if 1
{
std::vector<OptixInstance> instances = state.instances;
for( size_t i = 0; i < state.fume.size(); ++i )
{
ExhaustFume& fume = state.fume[i];
OptixInstance oi = {};
memcpy( oi.transform, &Matrix3x4::Identity(), sizeof( float ) * 12 );
oi.visibilityMask = 255;
OptixSRTMotionTransform& srt = fume.srt_animation.transform( 0 );
OptixTraversableHandle handle = fume.exploding_gas_handle;
srt.child = handle;
optixConvertPointerToTraversableHandle( state.context, fume.d_srt,
OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM, &handle );
oi.traversableHandle = handle;
CUDA_CHECK( cudaMemcpy( (char*)fume.d_srt, fume.srt_animation.data(), fume.srt_animation.byteSize(), cudaMemcpyHostToDevice ) );
instances.emplace_back( oi );
}
size_t instances_size_in_bytes = sizeof( OptixInstance ) * instances.size();
if( state.d_instances_size < instances_size_in_bytes )
{
if( state.d_instances )
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_instances ) ) );
CUDA_CHECK( cudaMalloc( (void**)&state.d_instances, instances_size_in_bytes ) );
state.d_instances_size = instances_size_in_bytes;
}
CUDA_CHECK( cudaMemcpy( (void*)state.d_instances, instances.data(), instances_size_in_bytes, cudaMemcpyHostToDevice ) );
state.ias_instance_input.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
state.ias_instance_input.instanceArray.instances = state.d_instances;
state.ias_instance_input.instanceArray.numInstances = static_cast<int>( instances.size() );
OptixAccelBufferSizes ias_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &state.ias_accel_options, &state.ias_instance_input, 1, &ias_buffer_sizes ) );
// grow in size if required
if( state.ias_output_buffer_size < ias_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaFree( (void*)state.d_ias_output_buffer ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_ias_output_buffer ), ias_buffer_sizes.outputSizeInBytes ) );
state.ias_output_buffer_size = ias_buffer_sizes.outputSizeInBytes;
}
}
#endif
OPTIX_CHECK( optixAccelBuild( state.context, state.stream,
&state.ias_accel_options, &state.ias_instance_input, 1,
state.d_temp_buffer, state.temp_buffer_size,
state.d_ias_output_buffer, state.ias_output_buffer_size, &state.ias_handle, nullptr, 0 ) );
state.params.handle = state.ias_handle;
}
void buildMergedGAS( MotionGeometryState& state, const sutil::Scene& scene, CUdeviceptr& gasData, OptixTraversableHandle& gasHandle, sutil::Aabb& aabb )
{
auto& meshes = scene.meshes();
// unify all meshes into a single GAS
std::vector<OptixBuildInput> buildInputs;
// since we bake all meshes into a single GAS, we need to apply the transforms
// we do so by using the build input pre-transform property of AS builds
std::vector<Matrix3x4> meshTransforms( meshes.size() );
CUdeviceptr d_preTransforms = 0;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_preTransforms ), sizeof( Matrix3x4 ) * meshTransforms.size() ) );
for( size_t i = 0; i < meshes.size(); ++i )
{
auto& mesh = meshes[i];
const size_t num_subMeshes = mesh->indices.size();
size_t buildInputOffset = buildInputs.size();
buildInputs.resize( buildInputOffset + num_subMeshes );
memcpy( &meshTransforms[i], mesh->transform.getData(), sizeof( float ) * 12 ); // mesh->transform is a 4x4 matrix, but also row-major
assert( mesh->positions.size() == num_subMeshes && mesh->normals.size() == num_subMeshes
&& mesh->texcoords.size() == num_subMeshes );
for( size_t j = 0; j < num_subMeshes; ++j )
{
OptixBuildInput& triangle_input = buildInputs[j + buildInputOffset];
memset( &triangle_input, 0, sizeof( OptixBuildInput ) );
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes =
mesh->positions[j].byte_stride ? mesh->positions[j].byte_stride : sizeof( float3 ),
triangle_input.triangleArray.numVertices = mesh->positions[j].count;
triangle_input.triangleArray.vertexBuffers = &( mesh->positions[j].data );
triangle_input.triangleArray.indexFormat =
mesh->indices[j].elmt_byte_size == 2 ? OPTIX_INDICES_FORMAT_UNSIGNED_SHORT3 : OPTIX_INDICES_FORMAT_UNSIGNED_INT3;
triangle_input.triangleArray.indexStrideInBytes =
mesh->indices[j].byte_stride ? mesh->indices[j].byte_stride : mesh->indices[j].elmt_byte_size * 3;
triangle_input.triangleArray.numIndexTriplets = mesh->indices[j].count / 3;
triangle_input.triangleArray.indexBuffer = mesh->indices[j].data;
triangle_input.triangleArray.flags = &state.triangle_flags;
triangle_input.triangleArray.numSbtRecords = 1;
triangle_input.triangleArray.preTransform = ( CUdeviceptr )( (char*)d_preTransforms + sizeof( Matrix3x4 ) * i );
triangle_input.triangleArray.transformFormat = OPTIX_TRANSFORM_FORMAT_MATRIX_FLOAT12;
}
}
CUDA_CHECK( cudaMemcpy( (void*)d_preTransforms, meshTransforms.data(), sizeof( Matrix3x4 ) * meshTransforms.size(), cudaMemcpyHostToDevice ) );
OptixAccelBuildOptions accelOptions = {};
accelOptions.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS;
accelOptions.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gasBufferSizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &accelOptions, buildInputs.data(),
static_cast<unsigned int>( buildInputs.size() ), &gasBufferSizes ) );
// allocate tmp memory
CUdeviceptr d_tempBuffer = 0, d_accelBuffer = 0;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_tempBuffer ), gasBufferSizes.tempSizeInBytes ) );
// allocate non-compacted output + compacted size
size_t compactedSizeOffset = roundUp<size_t>( gasBufferSizes.outputSizeInBytes, sizeof( size_t ) );
size_t aabbOffset = compactedSizeOffset + sizeof( size_t );
size_t totalSize = aabbOffset + 6 * sizeof( float );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_accelBuffer ), totalSize ) );
OptixAccelEmitDesc emitProperties[2] = {};
emitProperties[0].type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperties[0].result = ( CUdeviceptr )( (char*)d_accelBuffer + compactedSizeOffset );
emitProperties[1].type = OPTIX_PROPERTY_TYPE_AABBS;
emitProperties[1].result = ( CUdeviceptr )( (char*)d_accelBuffer + aabbOffset );
OPTIX_CHECK( optixAccelBuild( state.context, state.stream,
&accelOptions, buildInputs.data(),
static_cast<unsigned int>( buildInputs.size() ),
d_tempBuffer, gasBufferSizes.tempSizeInBytes,
d_accelBuffer, gasBufferSizes.outputSizeInBytes,
&gasHandle,
emitProperties, 2
) );
CUDA_CHECK( cudaMemcpy( aabb.data(), (const char*)d_accelBuffer + aabbOffset, 6 * sizeof( float ), cudaMemcpyDeviceToHost ) );
CUDA_CHECK( cudaFree( (void*)d_tempBuffer ) );
CUDA_CHECK( cudaFree( (void*)d_preTransforms ) );
// Compact GAS
size_t compactedGasSize;
CUDA_CHECK( cudaMemcpy( &compactedGasSize, (const char*)d_accelBuffer + compactedSizeOffset, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compactedGasSize < gasBufferSizes.outputSizeInBytes )
{
CUdeviceptr uncompactedAccel = d_accelBuffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_accelBuffer ), compactedGasSize ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, state.stream, gasHandle, d_accelBuffer, compactedGasSize, &gasHandle ) );
CUDA_CHECK( cudaFree( (void*)uncompactedAccel ) );
}
gasData = d_accelBuffer;
}
void buildMeshAccel( MotionGeometryState& state )
{
// Allocate temporary space for vertex generation.
// The same memory space is reused for generating the deformed and exploding vertices before updates.
uint32_t numVertices = g_tessellation_resolution * g_tessellation_resolution * 6;
const size_t vertices_size_in_bytes = numVertices * sizeof( float3 );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_temp_vertices[0] ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_temp_vertices[1] ), vertices_size_in_bytes ) );
// Build static triangulated sphere.
launchGenerateAnimatedVertices( state, AnimationMode_None, 0, 0, g_tessellation_resolution );
// Build an AS over the triangles.
// We use un-indexed triangles so we can explode the sphere per triangle.
state.triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
state.triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
state.triangle_input.triangleArray.vertexStrideInBytes = sizeof( float3 );
state.triangle_input.triangleArray.numVertices = numVertices;
state.triangle_input.triangleArray.vertexBuffers = state.d_temp_vertices;
state.triangle_input.triangleArray.flags = &state.triangle_flags;
state.triangle_input.triangleArray.numSbtRecords = 1;
state.triangle_input.triangleArray.sbtIndexOffsetBuffer = 0;
state.triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = 0;
state.triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = 0;
state.triangle_input_fume = state.triangle_input;
state.triangle_input_fume.triangleArray.numVertices = g_tessellation_resolution_fume * g_tessellation_resolution_fume * 6;
OptixAccelBuildOptions gas_accel_options = {};
gas_accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION | OPTIX_BUILD_FLAG_ALLOW_UPDATE | OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS | OPTIX_BUILD_FLAG_PREFER_FAST_TRACE;
gas_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
gas_accel_options.motionOptions.numKeys = 2;
gas_accel_options.motionOptions.timeBegin = 0;
gas_accel_options.motionOptions.timeEnd = 1;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
state.context,
&gas_accel_options,
&state.triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
state.temp_buffer_size = gas_buffer_sizes.tempSizeInBytes;
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast< void** >( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( ( char* )d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
state.stream, // CUDA stream
&gas_accel_options,
&state.triangle_input,
1, // num build inputs
state.d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.static_gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
// The memory requirements for the uncompressed exploding GAS (fume) won't change so we can rebuild in-place.
state.exploding_gas_output_buffer_size = gas_buffer_sizes.outputSizeInBytes;
OptixAccelRelocationInfo relocationInfo;
OPTIX_CHECK( optixAccelGetRelocationInfo( state.context, state.static_gas_handle, &relocationInfo ) );
// Compress sphere GAS
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, ( void* )emitProperty.result, sizeof( size_t ), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_static_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, state.stream, state.static_gas_handle, state.d_static_gas_output_buffer, compacted_gas_size, &state.static_gas_handle ) );
CUDA_CHECK( cudaFree( ( void* )d_buffer_temp_output_gas_and_compacted_size ) );
state.static_gas_output_buffer_size = compacted_gas_size;
}
else
{
state.d_static_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
state.static_gas_output_buffer_size = gas_buffer_sizes.outputSizeInBytes;
}
// Replicate the compressed GAS for the deforming sphere.
// The deforming sphere is never rebuild so we refit the compressed GAS without requiring recompression.
state.deforming_gas_output_buffer_size = state.static_gas_output_buffer_size;
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &state.d_deforming_gas_output_buffer ), state.deforming_gas_output_buffer_size ) );
CUDA_CHECK( cudaMemcpy( ( void* )state.d_deforming_gas_output_buffer, ( const void* )state.d_static_gas_output_buffer, state.deforming_gas_output_buffer_size, cudaMemcpyDeviceToDevice ) );
OPTIX_CHECK( optixAccelRelocate( state.context, state.stream, &relocationInfo, 0, 0, state.d_deforming_gas_output_buffer, state.deforming_gas_output_buffer_size, &state.deforming_gas_handle ) );
{
deformSphere.matrix_animation = MatrixMotionTransformArray( 1, 10 );
for(unsigned int i=0; i<deformSphere.matrix_animation.numKeys(); ++i)
{
*(Matrix3x4*)deformSphere.matrix_animation.motionData( 0, i ) = Matrix3x4::Identity();
}
}
//////////////////////////////////////////////////////////////////////////
// load plane and propeller
{
sutil::Scene s;
std::string fileName = sutil::sampleFilePath( "data/Plane", "biplane.gltf" );
loadScene( fileName, s );
buildMergedGAS( state, s, state.d_plane_gas_output_buffer, state.plane_gas_handle, state.planeAabb );
}
{
sutil::Scene s;
sutil::Aabb dummyAabb;
std::string fileName = sutil::sampleFilePath( "data/Plane", "biplane_propeller.gltf" );
loadScene( fileName, s );
buildMergedGAS( state, s, state.d_planePropeller_gas_output_buffer, state.planePropeller_gas_handle, dummyAabb );
}
// init animation of plane and propeller
{
plane.srt_animation = SRTMotionTransformArray( 2, 2 );
float3 scale = make_float3( 1 );
float3 shear = make_float3( 0 );
float3 scaleShearPivot = make_float3( 0 );
float3 rotationPivot = make_float3( 0 );
float3 translation = make_float3( 0 );
plane.srt_animation.motionData( 0, 0 ) = buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 0, 0, 1 ), 0 ), rotationPivot, translation );
plane.srt_animation.motionData( 0, 1 ) = buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 0, 0, 1 ), 0 ), rotationPivot, translation );
plane.srt_animation.motionData( 1, 0 ) = buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 0, 0, 1 ), 0 ), rotationPivot, translation );
plane.srt_animation.motionData( 1, 1 ) = buildSRT( scale, shear, scaleShearPivot, Quaternion( make_float3( 0, 0, 1 ), 0 ), rotationPivot, translation );
}
{
plane.srt_animationPropeller = SRTMotionTransformArray( 1, 100 );
plane.srt_animationPropeller.motionData( 0, 0 ) = buildSRT( make_float3( 1 ), Quaternion( make_float3( 0, 0, 1 ), 0 ), make_float3( 0 ) );
for( unsigned int i = 1; i < plane.srt_animationPropeller.numKeys(); ++i )
{
plane.srt_animationPropeller.motionData( 0, i ) = plane.srt_animationPropeller.motionData( 0, 0 );
}
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// Build the IAS
// alloc memory to be able to generate handles
CUDA_CHECK( cudaMalloc( (void**)&deformSphere.d_matrices, deformSphere.matrix_animation.byteSize() ) );
CUDA_CHECK( cudaMalloc( (void**)&plane.d_srts, plane.srt_animation.byteSize() ) );
CUDA_CHECK( cudaMalloc( (void**)&plane.d_srtsPropeller, plane.srt_animationPropeller.byteSize() ) );
// static sphere, orbiting sphere, plane, plane propeller
// 'exhaust fume' instances are added on demand
const int32_t INST_COUNT = 4;
std::vector<OptixInstance>& instances = state.instances;
instances.resize( INST_COUNT );
for( size_t i = 0; i < instances.size(); ++i )
{
memcpy( instances[i].transform, &Matrix3x4::Identity(), sizeof( float ) * 12 );
instances[i].sbtOffset = 0;
instances[i].visibilityMask = 255;
}
unsigned int iIdx = 0;
instances[iIdx++].traversableHandle = state.static_gas_handle;
{
OptixTraversableHandle handle = state.deforming_gas_handle;
OptixMatrixMotionTransform& t = deformSphere.matrix_animation.transform( 0 );
t.child = handle;
t.motionOptions.flags = 0;
t.motionOptions.numKeys = deformSphere.matrix_animation.numKeys();
t.motionOptions.timeBegin = 0;
t.motionOptions.timeEnd = 1;
optixConvertPointerToTraversableHandle( state.context, ( CUdeviceptr )( (char*)deformSphere.d_matrices ),
OPTIX_TRAVERSABLE_TYPE_MATRIX_MOTION_TRANSFORM, &handle );
instances[iIdx++].traversableHandle = handle;
}
{
OptixTraversableHandle handle = state.plane_gas_handle;
unsigned int tIdx = 0;
OptixSRTMotionTransform& t = plane.srt_animation.transform( tIdx );
t.child = handle;
t.motionOptions.flags = 0;
t.motionOptions.numKeys = plane.srt_animation.numKeys();
t.motionOptions.timeBegin = 0;
t.motionOptions.timeEnd = 1;
optixConvertPointerToTraversableHandle( state.context,
(CUdeviceptr)((char*)plane.d_srts + plane.srt_animation.byteSizePerTransform() * tIdx),
OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM, &handle );
instances[iIdx++].traversableHandle = handle;
}
{
OptixTraversableHandle handle = state.planePropeller_gas_handle;
{
unsigned int tIdx = 0;
OptixSRTMotionTransform& t = plane.srt_animationPropeller.transform( tIdx );
t.child = handle;
t.motionOptions.flags = 0;
t.motionOptions.numKeys = plane.srt_animationPropeller.numKeys();
t.motionOptions.timeBegin = 0;
t.motionOptions.timeEnd = 1;
optixConvertPointerToTraversableHandle( state.context,
(CUdeviceptr)((char*)plane.d_srtsPropeller + plane.srt_animationPropeller.byteSizePerTransform() * tIdx),
OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM, &handle );
}
{
unsigned int tIdx = 1;
OptixSRTMotionTransform& t = plane.srt_animation.transform( tIdx );
t.child = handle;
t.motionOptions.flags = 0;
t.motionOptions.numKeys = plane.srt_animation.numKeys();
t.motionOptions.timeBegin = 0;
t.motionOptions.timeEnd = 1;
optixConvertPointerToTraversableHandle( state.context,
(CUdeviceptr)((char*)plane.d_srts + plane.srt_animation.byteSizePerTransform() * tIdx),
OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM, &handle );
}
instances[iIdx++].traversableHandle = handle;
}
CUDA_CHECK( cudaMemcpy( (char*)deformSphere.d_matrices, deformSphere.matrix_animation.data(), deformSphere.matrix_animation.byteSize(), cudaMemcpyHostToDevice ) );
CUDA_CHECK( cudaMemcpy( (char*)plane.d_srts, plane.srt_animation.data(), plane.srt_animation.byteSize(), cudaMemcpyHostToDevice ) );
CUDA_CHECK( cudaMemcpy( (char*)plane.d_srtsPropeller, plane.srt_animationPropeller.data(), plane.srt_animationPropeller.byteSize(), cudaMemcpyHostToDevice ) );
size_t instances_size_in_bytes = sizeof( OptixInstance ) * instances.size();
CUDA_CHECK( cudaMalloc( ( void** )&state.d_instances, instances_size_in_bytes ) );
state.d_instances_size = instances_size_in_bytes;
CUDA_CHECK( cudaMemcpy( ( void* )state.d_instances, instances.data(), instances_size_in_bytes, cudaMemcpyHostToDevice ) );
state.ias_instance_input.type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
state.ias_instance_input.instanceArray.instances = state.d_instances;
state.ias_instance_input.instanceArray.numInstances = static_cast<int>( instances.size() );
// we choose FAST_BUILD here as we need to rebuild every frame, no update or compaction needed
state.ias_accel_options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_BUILD;
// In this interactive sample, build times can govern render times.
// Hence, we build a static IAS with faster build times despite slower traversal times.
#if 1
state.ias_accel_options.motionOptions.numKeys = 1;
#else
state.ias_accel_options.motionOptions.numKeys = 2;
state.ias_accel_options.motionOptions.timeBegin = 0;
state.ias_accel_options.motionOptions.timeEnd = 1;
#endif
state.ias_accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes ias_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage( state.context, &state.ias_accel_options, &state.ias_instance_input, 1, &ias_buffer_sizes ) );
// non-compacted output
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_ias_output_buffer ), ias_buffer_sizes.outputSizeInBytes ) );
size_t maxUpdateTempSize = std::max( ias_buffer_sizes.tempSizeInBytes, gas_buffer_sizes.tempUpdateSizeInBytes );
if( maxUpdateTempSize > state.temp_buffer_size )
{
CUDA_CHECK( cudaFree( (void*)state.d_temp_buffer ) );
state.temp_buffer_size = maxUpdateTempSize;
CUDA_CHECK( cudaMalloc( (void**)&state.d_temp_buffer, state.temp_buffer_size ) );
}
OPTIX_CHECK( optixAccelBuild( state.context, state.stream,
&state.ias_accel_options,
&state.ias_instance_input, 1,
state.d_temp_buffer, ias_buffer_sizes.tempSizeInBytes,
state.d_ias_output_buffer, ias_buffer_sizes.outputSizeInBytes,
&state.ias_handle,
nullptr, 0 ) );
state.params.handle = state.ias_handle;
}
void createModule( MotionGeometryState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = true;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY;
state.pipeline_compile_options.numPayloadValues = 5;
state.pipeline_compile_options.numAttributeValues = 2;
#ifdef DEBUG // Enables debug exceptions during optix launches. This may incur significant performance cost and should only be done during development.
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_DEBUG | OPTIX_EXCEPTION_FLAG_TRACE_DEPTH | OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
#else
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
#endif
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
state.pipeline_compile_options.usesPrimitiveTypeFlags = OPTIX_PRIMITIVE_TYPE_FLAGS_TRIANGLE;
OptixModuleCompileBoundValueEntry boundValue = {};
{
boundValue.pipelineParamOffsetInBytes = offsetof( Params, ao );
boundValue.sizeInBytes = sizeof( Params::ao );
boundValue.boundValuePtr = &state.renderAO;
boundValue.annotation = "ao";
module_compile_options.numBoundValues = 1;
module_compile_options.boundValues = &boundValue;
}
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixMotionGeometry.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module
) );
}
void createProgramGroups( MotionGeometryState& state )
{
OptixProgramGroupOptions program_group_options = {};
char log[2048];
size_t sizeof_log = sizeof( log );
{
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.raygen_prog_group
) );
}
{
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__ms";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options,
log, &sizeof_log,
&state.miss_group
) );
}
{
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__ch";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.hit_group
) );
}
{
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__occlusion";
sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate( state.context, &miss_prog_group_desc,
1, // num program groups
&program_group_options, log, &sizeof_log, &state.miss_group_occlusion ) );
}
}
void createPipeline( MotionGeometryState& state )
{
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.miss_group,
state.miss_group_occlusion,
state.hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = 20;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline
) );
// We need to specify the max traversal depth. Calculate the stack sizes, so we can specify all
// parameters to optixPipelineSetStackSize.
OptixStackSizes stack_sizes = {};
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.raygen_prog_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.miss_group, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.miss_group_occlusion, &stack_sizes ) );
OPTIX_CHECK( optixUtilAccumulateStackSizes( state.hit_group, &stack_sizes ) );
uint32_t max_trace_depth = pipeline_link_options.maxTraceDepth;
uint32_t max_cc_depth = 0;
uint32_t max_dc_depth = 0;
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes(
&stack_sizes,
max_trace_depth,
max_cc_depth,
max_dc_depth,
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state,
&continuation_stack_size
) );
// This is 4 since the largest depth is IAS->MT->MT->GAS
const uint32_t max_traversable_graph_depth = 4;
OPTIX_CHECK( optixPipelineSetStackSize(
state.pipeline,
direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state,
continuation_stack_size,
max_traversable_graph_depth
) );
}
void createSBT( MotionGeometryState& state )
{
CUdeviceptr d_raygen_record;
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &d_raygen_record ), raygen_record_size ) );
RayGenRecord rg_sbt = {};
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_raygen_record ),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_miss_records;
const unsigned int numMissPrograms = 2;
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast< void** >( &d_miss_records ), miss_record_size * numMissPrograms ) );
MissRecord ms_sbt[numMissPrograms];
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_group, &ms_sbt[0] ) );
OPTIX_CHECK( optixSbtRecordPackHeader( state.miss_group_occlusion, &ms_sbt[1] ) );
ms_sbt[0].data.bg_color = make_float4( 0.0f );
ms_sbt[1].data.bg_color = make_float4( 0.0f );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_miss_records ),
ms_sbt,
miss_record_size * numMissPrograms,
cudaMemcpyHostToDevice
) );
std::vector<HitGroupRecord> hitgroup_records( 1 );
OPTIX_CHECK( optixSbtRecordPackHeader( state.hit_group, &hitgroup_records[0] ) );
hitgroup_records[0].data.color = make_float3( 1 );
CUdeviceptr d_hitgroup_records;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast< void** >( &d_hitgroup_records ),
hitgroup_record_size * hitgroup_records.size()
) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast< void* >( d_hitgroup_records ),
hitgroup_records.data(),
hitgroup_record_size*hitgroup_records.size(),
cudaMemcpyHostToDevice
) );
state.sbt.raygenRecord = d_raygen_record;
state.sbt.missRecordBase = d_miss_records;
state.sbt.missRecordStrideInBytes = static_cast< uint32_t >( miss_record_size );
state.sbt.missRecordCount = numMissPrograms;
state.sbt.hitgroupRecordBase = d_hitgroup_records;
state.sbt.hitgroupRecordStrideInBytes = static_cast< uint32_t >( hitgroup_record_size );
state.sbt.hitgroupRecordCount = static_cast< uint32_t >( hitgroup_records.size() );
}
void cleanupState( MotionGeometryState& state )
{
OPTIX_CHECK( optixPipelineDestroy( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy( state.hit_group ) );
OPTIX_CHECK( optixModuleDestroy( state.ptx_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_temp_vertices[0] ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_temp_vertices[1] ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_static_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_deforming_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_plane_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_planePropeller_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_instances ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_ias_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast< void* >( state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
MotionGeometryState state;
state.params.width = 1024;
state.params.height = 768;
state.time = 0.f;
sutil::CUDAOutputBufferType output_buffer_type = sutil::CUDAOutputBufferType::GL_INTEROP;
int num_frames = 18;
float animation_time = 10.0f;
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--no-gl-interop" )
{
output_buffer_type = sutil::CUDAOutputBufferType::CUDA_DEVICE;
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg.substr( 0, 6 ) == "--dim=" )
{
const std::string dims_arg = arg.substr( 6 );
int w, h;
sutil::parseDimensions( dims_arg.c_str(), w, h );
state.params.width = w;
state.params.height = h;
}
else if( arg == "--time" || arg == "-t" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
animation_time = (float)atof( argv[++i] );
}
else if( arg == "--frames" || arg == "-n" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
num_frames = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
//
// Set up OptiX state
//
createContext( state );
createModule( state );
createProgramGroups( state );
createPipeline( state );
buildMeshAccel( state );
createSBT( state );
initLaunchParams( state );
if( outfile.empty() )
{
std::cout << "Keys: Up/Down Double/half target frame rate\n";
std::cout << " M/N Increase/reduce plane speed\n";
std::cout << " J/H Increase/reduce deform sphere orbit speed\n";
std::cout << " V Toggle: camera follow plane\n";
std::cout << " B Add exhaust fume\n";
std::cout << " A Toggle: AO rendering\n";
GLFWwindow* window = sutil::initUI( "optixMotionGeometry", state.params.width, state.params.height );
glfwSetMouseButtonCallback( window, mouseButtonCallback );
glfwSetCursorPosCallback( window, cursorPosCallback );
glfwSetWindowSizeCallback( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback( window, keyCallback );
glfwSetScrollCallback( window, scrollCallback );
glfwSetWindowUserPointer( window, &state );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
output_buffer.setStream( state.stream );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
std::chrono::duration<double> full_frame_time( 1/60.0 ); // init with 60.0 fps
auto tstart = std::chrono::system_clock::now();
state.targetFrameTime = 1 / 30.0f;
do
{
cudaDeviceSynchronize();
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
//////////////////////////////////////////////////////////////////////////
auto tnow = std::chrono::system_clock::now();
std::chrono::duration<double> time = tnow - tstart;
state.time_last_frame = state.time;
state.time = (float)time.count();
float timePassed = state.time - state.time_last_frame;
unsigned int targetSpp = max( 1u, (unsigned int)(state.targetFrameTime / timePassed * state.params.spp) );
if( abs( (float)targetSpp / state.params.spp - 1u ) > 0.2 )
{
state.params.spp = ( state.params.spp + targetSpp ) / 2;
}
else
{
if( state.time - state.time_last_frame < state.targetFrameTime )
state.params.spp++;
else
state.params.spp = max( 1u, state.params.spp - 1 );
}
//////////////////////////////////////////////////////////////////////////
if( state.time - state.time_last_fume > 0.4f )
{
addFume( state );
state.time_last_fume = state.time;
}
//////////////////////////////////////////////////////////////////////////
updateMeshAccel( state );
handleCameraUpdate( state );
handleResize( output_buffer, state.params );
// sync to correctly attribute where time is spent
cudaDeviceSynchronize();
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, state );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1 - t0;
full_frame_time = state_update_time + render_time + display_time;
sutil::displayStats( state_update_time, render_time, display_time );
sutil::beginFrameImGui();
static char display_text[256];
sprintf( display_text,
"ambient occlusion: %s\n"
"samples per pixel: %d\n",
( state.renderAO ? "on" : "off" ), state.params.spp );
sutil::displayText( display_text, 10.0f, 100.0f );
sutil::endFrameImGui();
glfwSwapBuffers( window );
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
CUDA_SYNC_CHECK();
}
sutil::cleanupUI( window );
}
else
{
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
sutil::initGLFW(); // For GL context
sutil::initGL();
}
sutil::CUDAOutputBuffer<uchar4> output_buffer(
output_buffer_type,
state.params.width,
state.params.height
);
handleCameraUpdate( state );
handleResize( output_buffer, state.params );
// run animation frames
for( unsigned int i = 0; i < static_cast<unsigned int>( num_frames ); ++i )
{
state.time_last_frame = state.time;
state.time = i * ( animation_time / ( num_frames - 1 ) );
if( state.time - state.time_last_fume > 0.4f )
{
addFume( state );
state.time_last_fume = state.time;
}
updateMeshAccel( state );
launchSubframe( output_buffer, state );
}
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
if( output_buffer_type == sutil::CUDAOutputBufferType::GL_INTEROP )
{
glfwTerminate();
}
}
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/optixMotionGeometry.cu | CUDA | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixMotionGeometry.h"
#include "random.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
struct Onb
{
__forceinline__ __device__ Onb( const float3& normal )
{
m_normal = normal;
if( fabs( m_normal.x ) > fabs( m_normal.z ) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize( m_binormal );
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform( float3& p ) const
{
p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
static __forceinline__ __device__ void cosine_sample_hemisphere( const float u1, const float u2, float3& p )
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f * M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x * p.x - p.y * p.y ) );
}
// Use named types for compatibility with nvrtc
// Otherwise these structs can be defined as unnamed structs directly in 'Payload'
// to avoid access via p0123.px and directly access px.
struct t_p0123 {
unsigned int p0, p1, p2, p3;
};
struct t_cseed {
float3 c;
unsigned int seed;
};
struct Payload {
union {
t_p0123 p0123;
t_cseed cseed;
};
__forceinline__ __device__ void setAll()
{
optixSetPayload_0( p0123.p0 );
optixSetPayload_1( p0123.p1 );
optixSetPayload_2( p0123.p2 );
optixSetPayload_3( p0123.p3 );
}
__forceinline__ __device__ void getAll()
{
p0123.p0 = optixGetPayload_0();
p0123.p1 = optixGetPayload_1();
p0123.p2 = optixGetPayload_2();
p0123.p3 = optixGetPayload_3();
}
__forceinline__ __device__ void setC()
{
optixSetPayload_0( p0123.p0 );
optixSetPayload_1( p0123.p1 );
optixSetPayload_2( p0123.p2 );
}
__forceinline__ __device__ void getC()
{
p0123.p0 = optixGetPayload_0();
p0123.p1 = optixGetPayload_1();
p0123.p2 = optixGetPayload_2();
}
__forceinline__ __device__ void getSeed()
{
p0123.p3 = optixGetPayload_3();
}
};
static __forceinline__ __device__ void trace(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
float time,
Payload& prd
)
{
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
time,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
0, // SBT offset, first ray type (only one here)
0, // SBT stride, forcing a single HitGroup in combination with an sbt offset set to zero for every instance!
0, // missSBTIndex, used for camera rays
prd.p0123.p0, prd.p0123.p1, prd.p0123.p2, prd.p0123.p3
);
}
extern "C" __global__ void __raygen__rg()
{
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
Payload payload;
payload.cseed.seed = tea<4>( idx.y * dim.x + idx.x, 12346789 + params.subframe_index );
float3 final_c = make_float3( 0 );
#pragma unroll 1
for( int x = 1; x <= params.spp; ++x )
{
const float2 d = 2.0f * make_float2(
( static_cast< float >( idx.x ) + rnd( payload.cseed.seed ) ) / static_cast< float >( dim.x ),
( static_cast< float >( idx.y ) + rnd( payload.cseed.seed ) ) / static_cast< float >( dim.y )
) - 1.0f;
float3 direction = normalize( d.x * U + d.y * V + W );
float time = rnd( payload.cseed.seed );
payload.cseed.c = make_float3( 0.5f, 0.5f, 0.5f );
trace( params.handle,
eye,
direction,
0.00f, // tmin
1e16f, // tmax
time,
payload );
final_c += payload.cseed.c;
}
final_c /= params.spp;
params.frame_buffer[idx.y * params.width + idx.x] = make_color( final_c );
}
extern "C" __global__ void __miss__ms()
{
MissData* rt_data = reinterpret_cast< MissData* >( optixGetSbtDataPointer() );
Payload p;
p.cseed.c = make_float3( rt_data->bg_color.x, rt_data->bg_color.y, rt_data->bg_color.z );
p.setC();
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0 );
}
extern "C" __global__ void __closesthit__ch()
{
Payload p;
p.getSeed();
// fetch current triangle vertices
float3 data[3];
optixGetTriangleVertexData( optixGetGASTraversableHandle(), optixGetPrimitiveIndex(), optixGetSbtGASIndex(),
optixGetRayTime(), data );
// compute triangle normal
data[1] -= data[0];
data[2] -= data[0];
float3 normal = make_float3(
data[1].y*data[2].z - data[1].z*data[2].y,
data[1].z*data[2].x - data[1].x*data[2].z,
data[1].x*data[2].y - data[1].y*data[2].x );
const float s = 0.5f / sqrtf( normal.x*normal.x + normal.y*normal.y + normal.z*normal.z );
float shade = 1.0f;
if( params.ao )
{
const float z1 = rnd( p.cseed.seed );
const float z2 = rnd( p.cseed.seed );
unsigned int occluded = 1;
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
float3 wn = normalize( optixTransformNormalFromObjectToWorldSpace( 2.f * s*normal ) );
wn = faceforward( wn, -optixGetWorldRayDirection(), wn );
Onb onb( wn );
onb.inverse_transform( w_in );
float3 pHit = optixGetWorldRayOrigin() + optixGetWorldRayDirection() * optixGetRayTmax();
optixTrace(
params.handle,
pHit + wn*0.001f, w_in,
0.00f, 1e16f, optixGetRayTime(), // tmin, tmax, time
0xff,
OPTIX_RAY_FLAG_DISABLE_CLOSESTHIT | OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT | OPTIX_RAY_FLAG_DISABLE_ANYHIT,
0, 0, // no hit group will even be executed (assuming no IS), hence, set stride and offset to 0
1, // select MS program
occluded ); // this is inout here! If MS is called, it will override the payload
if( occluded )
shade = 0.f;
}
HitGroupData* rt_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
// convert normal to color and store in payload
p.cseed.c = shade * ( normal * s + make_float3( 0.5f ) ) * rt_data->color;
p.setAll();
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/optixMotionGeometry.h | C/C++ Header | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
struct Params
{
uchar4* frame_buffer;
unsigned int width;
unsigned int height;
unsigned int spp;
float3 eye, U, V, W;
OptixTraversableHandle handle;
int subframe_index;
bool ao;
};
struct RayGenData
{
float3 cam_eye;
float3 camera_u, camera_v, camera_w;
};
struct MissData
{
float4 bg_color;
};
struct HitGroupData
{
float3 color;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/tiny_obj_loader.h | C/C++ Header | /*
The MIT License (MIT)
Copyright (c) 2012-2018 Syoyo Fujita and many contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//
// version 2.0.0 : Add new object oriented API. 1.x API is still provided.
// * Support line primitive.
// * Support points primitive.
// * Support multiple search path for .mtl(v1 API).
// version 1.4.0 : Modifed ParseTextureNameAndOption API
// version 1.3.1 : Make ParseTextureNameAndOption API public
// version 1.3.0 : Separate warning and error message(breaking API of LoadObj)
// version 1.2.3 : Added color space extension('-colorspace') to tex opts.
// version 1.2.2 : Parse multiple group names.
// version 1.2.1 : Added initial support for line('l') primitive(PR #178)
// version 1.2.0 : Hardened implementation(#175)
// version 1.1.1 : Support smoothing groups(#162)
// version 1.1.0 : Support parsing vertex color(#144)
// version 1.0.8 : Fix parsing `g` tag just after `usemtl`(#138)
// version 1.0.7 : Support multiple tex options(#126)
// version 1.0.6 : Add TINYOBJLOADER_USE_DOUBLE option(#124)
// version 1.0.5 : Ignore `Tr` when `d` exists in MTL(#43)
// version 1.0.4 : Support multiple filenames for 'mtllib'(#112)
// version 1.0.3 : Support parsing texture options(#85)
// version 1.0.2 : Improve parsing speed by about a factor of 2 for large
// files(#105)
// version 1.0.1 : Fixes a shape is lost if obj ends with a 'usemtl'(#104)
// version 1.0.0 : Change data structure. Change license from BSD to MIT.
//
//
// Use this in *one* .cc
// #define TINYOBJLOADER_IMPLEMENTATION
// #include "tiny_obj_loader.h"
//
#ifndef TINY_OBJ_LOADER_H_
#define TINY_OBJ_LOADER_H_
#include <map>
#include <string>
#include <vector>
namespace tinyobj {
// TODO(syoyo): Better C++11 detection for older compiler
#if __cplusplus > 199711L
#define TINYOBJ_OVERRIDE override
#else
#define TINYOBJ_OVERRIDE
#endif
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#pragma clang diagnostic ignored "-Wpadded"
#endif
// https://en.wikipedia.org/wiki/Wavefront_.obj_file says ...
//
// -blendu on | off # set horizontal texture blending
// (default on)
// -blendv on | off # set vertical texture blending
// (default on)
// -boost real_value # boost mip-map sharpness
// -mm base_value gain_value # modify texture map values (default
// 0 1)
// # base_value = brightness,
// gain_value = contrast
// -o u [v [w]] # Origin offset (default
// 0 0 0)
// -s u [v [w]] # Scale (default
// 1 1 1)
// -t u [v [w]] # Turbulence (default
// 0 0 0)
// -texres resolution # texture resolution to create
// -clamp on | off # only render texels in the clamped
// 0-1 range (default off)
// # When unclamped, textures are
// repeated across a surface,
// # when clamped, only texels which
// fall within the 0-1
// # range are rendered.
// -bm mult_value # bump multiplier (for bump maps
// only)
//
// -imfchan r | g | b | m | l | z # specifies which channel of the file
// is used to
// # create a scalar or bump texture.
// r:red, g:green,
// # b:blue, m:matte, l:luminance,
// z:z-depth..
// # (the default for bump is 'l' and
// for decal is 'm')
// bump -imfchan r bumpmap.tga # says to use the red channel of
// bumpmap.tga as the bumpmap
//
// For reflection maps...
//
// -type sphere # specifies a sphere for a "refl"
// reflection map
// -type cube_top | cube_bottom | # when using a cube map, the texture
// file for each
// cube_front | cube_back | # side of the cube is specified
// separately
// cube_left | cube_right
//
// TinyObjLoader extension.
//
// -colorspace SPACE # Color space of the texture. e.g.
// 'sRGB` or 'linear'
//
#ifdef TINYOBJLOADER_USE_DOUBLE
//#pragma message "using double"
typedef double real_t;
#else
//#pragma message "using float"
typedef float real_t;
#endif
typedef enum {
TEXTURE_TYPE_NONE, // default
TEXTURE_TYPE_SPHERE,
TEXTURE_TYPE_CUBE_TOP,
TEXTURE_TYPE_CUBE_BOTTOM,
TEXTURE_TYPE_CUBE_FRONT,
TEXTURE_TYPE_CUBE_BACK,
TEXTURE_TYPE_CUBE_LEFT,
TEXTURE_TYPE_CUBE_RIGHT
} texture_type_t;
struct texture_option_t {
texture_type_t type; // -type (default TEXTURE_TYPE_NONE)
real_t sharpness; // -boost (default 1.0?)
real_t brightness; // base_value in -mm option (default 0)
real_t contrast; // gain_value in -mm option (default 1)
real_t origin_offset[3]; // -o u [v [w]] (default 0 0 0)
real_t scale[3]; // -s u [v [w]] (default 1 1 1)
real_t turbulence[3]; // -t u [v [w]] (default 0 0 0)
int texture_resolution; // -texres resolution (No default value in the spec. We'll use -1)
bool clamp; // -clamp (default false)
char imfchan; // -imfchan (the default for bump is 'l' and for decal is 'm')
bool blendu; // -blendu (default on)
bool blendv; // -blendv (default on)
real_t bump_multiplier; // -bm (for bump maps only, default 1.0)
// extension
std::string colorspace; // Explicitly specify color space of stored texel
// value. Usually `sRGB` or `linear` (default empty).
};
struct material_t {
std::string name;
real_t ambient[3];
real_t diffuse[3];
real_t specular[3];
real_t transmittance[3];
real_t emission[3];
real_t shininess;
real_t ior; // index of refraction
real_t dissolve; // 1 == opaque; 0 == fully transparent
// illumination model (see http://www.fileformat.info/format/material/)
int illum;
int dummy; // Suppress padding warning.
std::string ambient_texname; // map_Ka
std::string diffuse_texname; // map_Kd
std::string specular_texname; // map_Ks
std::string specular_highlight_texname; // map_Ns
std::string bump_texname; // map_bump, map_Bump, bump
std::string displacement_texname; // disp
std::string alpha_texname; // map_d
std::string reflection_texname; // refl
texture_option_t ambient_texopt;
texture_option_t diffuse_texopt;
texture_option_t specular_texopt;
texture_option_t specular_highlight_texopt;
texture_option_t bump_texopt;
texture_option_t displacement_texopt;
texture_option_t alpha_texopt;
texture_option_t reflection_texopt;
// PBR extension
// http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr
real_t roughness; // [0, 1] default 0
real_t metallic; // [0, 1] default 0
real_t sheen; // [0, 1] default 0
real_t clearcoat_thickness; // [0, 1] default 0
real_t clearcoat_roughness; // [0, 1] default 0
real_t anisotropy; // aniso. [0, 1] default 0
real_t anisotropy_rotation; // anisor. [0, 1] default 0
real_t pad0;
std::string roughness_texname; // map_Pr
std::string metallic_texname; // map_Pm
std::string sheen_texname; // map_Ps
std::string emissive_texname; // map_Ke
std::string normal_texname; // norm. For normal mapping.
texture_option_t roughness_texopt;
texture_option_t metallic_texopt;
texture_option_t sheen_texopt;
texture_option_t emissive_texopt;
texture_option_t normal_texopt;
int pad2;
std::map<std::string, std::string> unknown_parameter;
#ifdef TINY_OBJ_LOADER_PYTHON_BINDING
// For pybind11
std::array<double, 3> GetDiffuse() {
std::array<double, 3> values;
values[0] = double(diffuse[0]);
values[1] = double(diffuse[1]);
values[2] = double(diffuse[2]);
return values;
}
std::array<double, 3> GetSpecular() {
std::array<double, 3> values;
values[0] = double(specular[0]);
values[1] = double(specular[1]);
values[2] = double(specular[2]);
return values;
}
std::array<double, 3> GetTransmittance() {
std::array<double, 3> values;
values[0] = double(transmittance[0]);
values[1] = double(transmittance[1]);
values[2] = double(transmittance[2]);
return values;
}
std::array<double, 3> GetEmission() {
std::array<double, 3> values;
values[0] = double(emission[0]);
values[1] = double(emission[1]);
values[2] = double(emission[2]);
return values;
}
std::array<double, 3> GetAmbient() {
std::array<double, 3> values;
values[0] = double(ambient[0]);
values[1] = double(ambient[1]);
values[2] = double(ambient[2]);
return values;
}
void SetDiffuse(std::array<double, 3> &a) {
diffuse[0] = real_t(a[0]);
diffuse[1] = real_t(a[1]);
diffuse[2] = real_t(a[2]);
}
void SetAmbient(std::array<double, 3> &a) {
ambient[0] = real_t(a[0]);
ambient[1] = real_t(a[1]);
ambient[2] = real_t(a[2]);
}
void SetSpecular(std::array<double, 3> &a) {
specular[0] = real_t(a[0]);
specular[1] = real_t(a[1]);
specular[2] = real_t(a[2]);
}
void SetTransmittance(std::array<double, 3> &a) {
transmittance[0] = real_t(a[0]);
transmittance[1] = real_t(a[1]);
transmittance[2] = real_t(a[2]);
}
std::string GetCustomParameter(const std::string &key) {
std::map<std::string, std::string>::const_iterator it =
unknown_parameter.find(key);
if (it != unknown_parameter.end()) {
return it->second;
}
return std::string();
}
#endif
};
struct tag_t {
std::string name;
std::vector<int> intValues;
std::vector<real_t> floatValues;
std::vector<std::string> stringValues;
};
// Index struct to support different indices for vtx/normal/texcoord.
// -1 means not used.
struct index_t {
int vertex_index;
int normal_index;
int texcoord_index;
};
struct mesh_t {
std::vector<int> vertex_indices;
std::vector<int> normal_indices;
std::vector<int> texcoord_indices;
std::vector<unsigned char>
num_face_vertices; // The number of vertices per
// face. 3 = triangle, 4 = quad,
// ... Up to 255 vertices per face.
std::vector<int> material_ids; // per-face material ID
std::vector<unsigned int> smoothing_group_ids; // per-face smoothing group
// ID(0 = off. positive value
// = group id)
std::vector<tag_t> tags; // SubD tag
};
// struct path_t {
// std::vector<int> indices; // pairs of indices for lines
//};
struct lines_t {
// Linear flattened indices.
std::vector<index_t> indices; // indices for vertices(poly lines)
std::vector<int> num_line_vertices; // The number of vertices per line.
};
struct points_t {
std::vector<index_t> indices; // indices for points
};
struct shape_t {
std::string name;
mesh_t mesh;
lines_t lines;
points_t points;
};
// Vertex attributes
struct attrib_t {
std::vector<real_t> vertices; // 'v'(xyz)
// For backward compatibility, we store vertex weight in separate array.
std::vector<real_t> vertex_weights; // 'v'(w)
std::vector<real_t> normals; // 'vn'
std::vector<real_t> texcoords; // 'vt'(uv)
// For backward compatibility, we store texture coordinate 'w' in separate
// array.
std::vector<real_t> texcoord_ws; // 'vt'(w)
std::vector<real_t> colors; // extension: vertex colors
attrib_t() {}
//
// For pybind11
//
const std::vector<real_t> &GetVertices() const { return vertices; }
const std::vector<real_t> &GetVertexWeights() const { return vertex_weights; }
};
struct callback_t {
// W is optional and set to 1 if there is no `w` item in `v` line
void (*vertex_cb)(void *user_data, real_t x, real_t y, real_t z, real_t w);
void (*normal_cb)(void *user_data, real_t x, real_t y, real_t z);
// y and z are optional and set to 0 if there is no `y` and/or `z` item(s) in
// `vt` line.
void (*texcoord_cb)(void *user_data, real_t x, real_t y, real_t z);
// called per 'f' line. num_indices is the number of face indices(e.g. 3 for
// triangle, 4 for quad)
// 0 will be passed for undefined index in index_t members.
void (*index_cb)(void *user_data, index_t *indices, int num_indices);
// `name` material name, `material_id` = the array index of material_t[]. -1
// if
// a material not found in .mtl
void (*usemtl_cb)(void *user_data, const char *name, int material_id);
// `materials` = parsed material data.
void (*mtllib_cb)(void *user_data, const material_t *materials,
int num_materials);
// There may be multiple group names
void (*group_cb)(void *user_data, const char **names, int num_names);
void (*object_cb)(void *user_data, const char *name);
callback_t()
: vertex_cb(NULL),
normal_cb(NULL),
texcoord_cb(NULL),
index_cb(NULL),
usemtl_cb(NULL),
mtllib_cb(NULL),
group_cb(NULL),
object_cb(NULL) {}
};
class MaterialReader {
public:
MaterialReader() {}
virtual ~MaterialReader();
virtual bool operator()(const std::string &matId,
std::vector<material_t> *materials,
std::map<std::string, int> *matMap, std::string *warn,
std::string *err) = 0;
};
///
/// Read .mtl from a file.
///
class MaterialFileReader : public MaterialReader {
public:
// Path could contain separator(';' in Windows, ':' in Posix)
explicit MaterialFileReader(const std::string &mtl_basedir)
: m_mtlBaseDir(mtl_basedir) {}
virtual ~MaterialFileReader() TINYOBJ_OVERRIDE {}
virtual bool operator()(const std::string &matId,
std::vector<material_t> *materials,
std::map<std::string, int> *matMap, std::string *warn,
std::string *err) TINYOBJ_OVERRIDE;
private:
std::string m_mtlBaseDir;
};
///
/// Read .mtl from a stream.
///
class MaterialStreamReader : public MaterialReader {
public:
explicit MaterialStreamReader(std::istream &inStream)
: m_inStream(inStream) {}
virtual ~MaterialStreamReader() TINYOBJ_OVERRIDE {}
virtual bool operator()(const std::string &matId,
std::vector<material_t> *materials,
std::map<std::string, int> *matMap, std::string *warn,
std::string *err) TINYOBJ_OVERRIDE;
private:
std::istream &m_inStream;
};
// v2 API
struct ObjReaderConfig {
bool triangulate; // triangulate polygon?
/// Parse vertex color.
/// If vertex color is not present, its filled with default value.
/// false = no vertex color
/// This will increase memory of parsed .obj
bool vertex_color;
///
/// Search path to .mtl file.
/// Default = "" = search from the same directory of .obj file.
/// Valid only when loading .obj from a file.
///
std::string mtl_search_path;
ObjReaderConfig() : triangulate(true), vertex_color(true) {}
};
///
/// Wavefront .obj reader class(v2 API)
///
class ObjReader {
public:
ObjReader() : valid_(false) {}
~ObjReader() {}
///
/// Load .obj and .mtl from a file.
///
/// @param[in] filename wavefront .obj filename
/// @param[in] config Reader configuration
///
bool ParseFromFile(const std::string &filename,
const ObjReaderConfig &config = ObjReaderConfig());
///
/// Parse .obj from a text string.
/// Need to supply .mtl text string by `mtl_text`.
/// This function ignores `mtllib` line in .obj text.
///
/// @param[in] obj_text wavefront .obj filename
/// @param[in] mtl_text wavefront .mtl filename
/// @param[in] config Reader configuration
///
bool ParseFromString(const std::string &obj_text, const std::string &mtl_text,
const ObjReaderConfig &config = ObjReaderConfig());
///
/// .obj was loaded or parsed correctly.
///
bool Valid() const { return valid_; }
const attrib_t &GetAttrib() const { return attrib_; }
const std::vector<shape_t> &GetShapes() const { return shapes_; }
const std::vector<material_t> &GetMaterials() const { return materials_; }
///
/// Warning message(may be filled after `Load` or `Parse`)
///
const std::string &Warning() const { return warning_; }
///
/// Error message(filled when `Load` or `Parse` failed)
///
const std::string &Error() const { return error_; }
private:
bool valid_;
attrib_t attrib_;
std::vector<shape_t> shapes_;
std::vector<material_t> materials_;
std::string warning_;
std::string error_;
};
/// ==>>========= Legacy v1 API =============================================
/// Loads .obj from a file.
/// 'attrib', 'shapes' and 'materials' will be filled with parsed shape data
/// 'shapes' will be filled with parsed shape data
/// Returns true when loading .obj become success.
/// Returns warning message into `warn`, and error message into `err`
/// 'mtl_basedir' is optional, and used for base directory for .mtl file.
/// In default(`NULL'), .mtl file is searched from an application's working
/// directory.
/// 'triangulate' is optional, and used whether triangulate polygon face in .obj
/// or not.
/// Option 'default_vcols_fallback' specifies whether vertex colors should
/// always be defined, even if no colors are given (fallback to white).
bool LoadObj(attrib_t *attrib, std::vector<shape_t> *shapes,
std::vector<material_t> *materials, std::string *warn,
std::string *err, const char *filename,
const char *mtl_basedir = NULL, bool triangulate = true,
bool default_vcols_fallback = true);
/// Loads .obj from a file with custom user callback.
/// .mtl is loaded as usual and parsed material_t data will be passed to
/// `callback.mtllib_cb`.
/// Returns true when loading .obj/.mtl become success.
/// Returns warning message into `warn`, and error message into `err`
/// See `examples/callback_api/` for how to use this function.
bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback,
void *user_data = NULL,
MaterialReader *readMatFn = NULL,
std::string *warn = NULL, std::string *err = NULL);
/// Loads object from a std::istream, uses `readMatFn` to retrieve
/// std::istream for materials.
/// Returns true when loading .obj become success.
/// Returns warning and error message into `err`
bool LoadObj(attrib_t *attrib, std::vector<shape_t> *shapes,
std::vector<material_t> *materials, std::string *warn,
std::string *err, std::istream *inStream,
MaterialReader *readMatFn = NULL, bool triangulate = true,
bool default_vcols_fallback = true);
/// Loads materials into std::map
void LoadMtl(std::map<std::string, int> *material_map,
std::vector<material_t> *materials, std::istream *inStream,
std::string *warning, std::string *err);
///
/// Parse texture name and texture option for custom texture parameter through
/// material::unknown_parameter
///
/// @param[out] texname Parsed texture name
/// @param[out] texopt Parsed texopt
/// @param[in] linebuf Input string
///
bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt,
const char *linebuf);
/// =<<========== Legacy v1 API =============================================
} // namespace tinyobj
#endif // TINY_OBJ_LOADER_H_
#ifdef TINYOBJLOADER_IMPLEMENTATION
#include <cassert>
#include <cctype>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <utility>
#include <fstream>
#include <sstream>
namespace tinyobj {
MaterialReader::~MaterialReader() {}
struct vertex_index_t {
int v_idx, vt_idx, vn_idx;
vertex_index_t() : v_idx(-1), vt_idx(-1), vn_idx(-1) {}
explicit vertex_index_t(int idx) : v_idx(idx), vt_idx(idx), vn_idx(idx) {}
vertex_index_t(int vidx, int vtidx, int vnidx)
: v_idx(vidx), vt_idx(vtidx), vn_idx(vnidx) {}
};
// Internal data structure for face representation
// index + smoothing group.
struct face_t {
unsigned int
smoothing_group_id; // smoothing group id. 0 = smoothing groupd is off.
int pad_;
std::vector<vertex_index_t> vertex_indices; // face vertex indices.
face_t() : smoothing_group_id(0), pad_(0) {}
};
// Internal data structure for line representation
struct __line_t {
// l v1/vt1 v2/vt2 ...
// In the specification, line primitrive does not have normal index, but
// TinyObjLoader allow it
std::vector<vertex_index_t> vertex_indices;
};
// Internal data structure for points representation
struct __points_t {
// p v1 v2 ...
// In the specification, point primitrive does not have normal index and
// texture coord index, but TinyObjLoader allow it.
std::vector<vertex_index_t> vertex_indices;
};
struct tag_sizes {
tag_sizes() : num_ints(0), num_reals(0), num_strings(0) {}
int num_ints;
int num_reals;
int num_strings;
};
struct obj_shape {
std::vector<real_t> v;
std::vector<real_t> vn;
std::vector<real_t> vt;
};
//
// Manages group of primitives(face, line, points, ...)
struct PrimGroup {
std::vector<face_t> faceGroup;
std::vector<__line_t> lineGroup;
std::vector<__points_t> pointsGroup;
void clear() {
faceGroup.clear();
lineGroup.clear();
pointsGroup.clear();
}
bool IsEmpty() const {
return faceGroup.empty() && lineGroup.empty() && pointsGroup.empty();
}
// TODO(syoyo): bspline, surface, ...
};
// See
// http://stackoverflow.com/questions/6089231/getting-std-ifstream-to-handle-lf-cr-and-crlf
static std::istream &safeGetline(std::istream &is, std::string &t) {
t.clear();
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
std::istream::sentry se(is, true);
std::streambuf *sb = is.rdbuf();
if (se) {
for (;;) {
int c = sb->sbumpc();
switch (c) {
case '\n':
return is;
case '\r':
if (sb->sgetc() == '\n') sb->sbumpc();
return is;
case EOF:
// Also handle the case when the last line has no line ending
if (t.empty()) is.setstate(std::ios::eofbit);
return is;
default:
t += static_cast<char>(c);
}
}
}
return is;
}
#define IS_SPACE(x) (((x) == ' ') || ((x) == '\t'))
#define IS_DIGIT(x) \
(static_cast<unsigned int>((x) - '0') < static_cast<unsigned int>(10))
#define IS_NEW_LINE(x) (((x) == '\r') || ((x) == '\n') || ((x) == '\0'))
// Make index zero-base, and also support relative index.
static inline bool fixIndex(int idx, int n, int *ret) {
if (!ret) {
return false;
}
if (idx > 0) {
(*ret) = idx - 1;
return true;
}
if (idx == 0) {
// zero is not allowed according to the spec.
return false;
}
if (idx < 0) {
(*ret) = n + idx; // negative value = relative
return true;
}
return false; // never reach here.
}
static inline std::string parseString(const char **token) {
std::string s;
(*token) += strspn((*token), " \t");
size_t e = strcspn((*token), " \t\r");
s = std::string((*token), &(*token)[e]);
(*token) += e;
return s;
}
static inline int parseInt(const char **token) {
(*token) += strspn((*token), " \t");
int i = atoi((*token));
(*token) += strcspn((*token), " \t\r");
return i;
}
// Tries to parse a floating point number located at s.
//
// s_end should be a location in the string where reading should absolutely
// stop. For example at the end of the string, to prevent buffer overflows.
//
// Parses the following EBNF grammar:
// sign = "+" | "-" ;
// END = ? anything not in digit ?
// digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" ;
// integer = [sign] , digit , {digit} ;
// decimal = integer , ["." , integer] ;
// float = ( decimal , END ) | ( decimal , ("E" | "e") , integer , END ) ;
//
// Valid strings are for example:
// -0 +3.1417e+2 -0.0E-3 1.0324 -1.41 11e2
//
// If the parsing is a success, result is set to the parsed value and true
// is returned.
//
// The function is greedy and will parse until any of the following happens:
// - a non-conforming character is encountered.
// - s_end is reached.
//
// The following situations triggers a failure:
// - s >= s_end.
// - parse failure.
//
static bool tryParseDouble(const char *s, const char *s_end, double *result) {
if (s >= s_end) {
return false;
}
double mantissa = 0.0;
// This exponent is base 2 rather than 10.
// However the exponent we parse is supposed to be one of ten,
// thus we must take care to convert the exponent/and or the
// mantissa to a * 2^E, where a is the mantissa and E is the
// exponent.
// To get the final double we will use ldexp, it requires the
// exponent to be in base 2.
int exponent = 0;
// NOTE: THESE MUST BE DECLARED HERE SINCE WE ARE NOT ALLOWED
// TO JUMP OVER DEFINITIONS.
char sign = '+';
char exp_sign = '+';
char const *curr = s;
// How many characters were read in a loop.
int read = 0;
// Tells whether a loop terminated due to reaching s_end.
bool end_not_reached = false;
bool leading_decimal_dots = false;
/*
BEGIN PARSING.
*/
// Find out what sign we've got.
if (*curr == '+' || *curr == '-') {
sign = *curr;
curr++;
if ((curr != s_end) && (*curr == '.')) {
// accept. Somethig like `.7e+2`, `-.5234`
leading_decimal_dots = true;
}
} else if (IS_DIGIT(*curr)) { /* Pass through. */
} else if (*curr == '.') {
// accept. Somethig like `.7e+2`, `-.5234`
leading_decimal_dots = true;
} else {
goto fail;
}
// Read the integer part.
end_not_reached = (curr != s_end);
if (!leading_decimal_dots) {
while (end_not_reached && IS_DIGIT(*curr)) {
mantissa *= 10;
mantissa += static_cast<int>(*curr - 0x30);
curr++;
read++;
end_not_reached = (curr != s_end);
}
// We must make sure we actually got something.
if (read == 0) goto fail;
}
// We allow numbers of form "#", "###" etc.
if (!end_not_reached) goto assemble;
// Read the decimal part.
if (*curr == '.') {
curr++;
read = 1;
end_not_reached = (curr != s_end);
while (end_not_reached && IS_DIGIT(*curr)) {
static const double pow_lut[] = {
1.0, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001,
};
const int lut_entries = sizeof pow_lut / sizeof pow_lut[0];
// NOTE: Don't use powf here, it will absolutely murder precision.
mantissa += static_cast<int>(*curr - 0x30) *
(read < lut_entries ? pow_lut[read] : std::pow(10.0, -read));
read++;
curr++;
end_not_reached = (curr != s_end);
}
} else if (*curr == 'e' || *curr == 'E') {
} else {
goto assemble;
}
if (!end_not_reached) goto assemble;
// Read the exponent part.
if (*curr == 'e' || *curr == 'E') {
curr++;
// Figure out if a sign is present and if it is.
end_not_reached = (curr != s_end);
if (end_not_reached && (*curr == '+' || *curr == '-')) {
exp_sign = *curr;
curr++;
} else if (IS_DIGIT(*curr)) { /* Pass through. */
} else {
// Empty E is not allowed.
goto fail;
}
read = 0;
end_not_reached = (curr != s_end);
while (end_not_reached && IS_DIGIT(*curr)) {
exponent *= 10;
exponent += static_cast<int>(*curr - 0x30);
curr++;
read++;
end_not_reached = (curr != s_end);
}
exponent *= (exp_sign == '+' ? 1 : -1);
if (read == 0) goto fail;
}
assemble:
*result = (sign == '+' ? 1 : -1) *
(exponent ? std::ldexp(mantissa * std::pow(5.0, exponent), exponent)
: mantissa);
return true;
fail:
return false;
}
static inline real_t parseReal(const char **token, double default_value = 0.0) {
(*token) += strspn((*token), " \t");
const char *end = (*token) + strcspn((*token), " \t\r");
double val = default_value;
tryParseDouble((*token), end, &val);
real_t f = static_cast<real_t>(val);
(*token) = end;
return f;
}
static inline bool parseReal(const char **token, real_t *out) {
(*token) += strspn((*token), " \t");
const char *end = (*token) + strcspn((*token), " \t\r");
double val;
bool ret = tryParseDouble((*token), end, &val);
if (ret) {
real_t f = static_cast<real_t>(val);
(*out) = f;
}
(*token) = end;
return ret;
}
static inline void parseReal2(real_t *x, real_t *y, const char **token,
const double default_x = 0.0,
const double default_y = 0.0) {
(*x) = parseReal(token, default_x);
(*y) = parseReal(token, default_y);
}
static inline void parseReal3(real_t *x, real_t *y, real_t *z,
const char **token, const double default_x = 0.0,
const double default_y = 0.0,
const double default_z = 0.0) {
(*x) = parseReal(token, default_x);
(*y) = parseReal(token, default_y);
(*z) = parseReal(token, default_z);
}
static inline void parseV(real_t *x, real_t *y, real_t *z, real_t *w,
const char **token, const double default_x = 0.0,
const double default_y = 0.0,
const double default_z = 0.0,
const double default_w = 1.0) {
(*x) = parseReal(token, default_x);
(*y) = parseReal(token, default_y);
(*z) = parseReal(token, default_z);
(*w) = parseReal(token, default_w);
}
// Extension: parse vertex with colors(6 items)
static inline bool parseVertexWithColor(real_t *x, real_t *y, real_t *z,
real_t *r, real_t *g, real_t *b,
const char **token,
const double default_x = 0.0,
const double default_y = 0.0,
const double default_z = 0.0) {
(*x) = parseReal(token, default_x);
(*y) = parseReal(token, default_y);
(*z) = parseReal(token, default_z);
const bool found_color =
parseReal(token, r) && parseReal(token, g) && parseReal(token, b);
if (!found_color) {
(*r) = (*g) = (*b) = 1.0;
}
return found_color;
}
static inline bool parseOnOff(const char **token, bool default_value = true) {
(*token) += strspn((*token), " \t");
const char *end = (*token) + strcspn((*token), " \t\r");
bool ret = default_value;
if ((0 == strncmp((*token), "on", 2))) {
ret = true;
} else if ((0 == strncmp((*token), "off", 3))) {
ret = false;
}
(*token) = end;
return ret;
}
static inline texture_type_t parseTextureType(
const char **token, texture_type_t default_value = TEXTURE_TYPE_NONE) {
(*token) += strspn((*token), " \t");
const char *end = (*token) + strcspn((*token), " \t\r");
texture_type_t ty = default_value;
if ((0 == strncmp((*token), "cube_top", strlen("cube_top")))) {
ty = TEXTURE_TYPE_CUBE_TOP;
} else if ((0 == strncmp((*token), "cube_bottom", strlen("cube_bottom")))) {
ty = TEXTURE_TYPE_CUBE_BOTTOM;
} else if ((0 == strncmp((*token), "cube_left", strlen("cube_left")))) {
ty = TEXTURE_TYPE_CUBE_LEFT;
} else if ((0 == strncmp((*token), "cube_right", strlen("cube_right")))) {
ty = TEXTURE_TYPE_CUBE_RIGHT;
} else if ((0 == strncmp((*token), "cube_front", strlen("cube_front")))) {
ty = TEXTURE_TYPE_CUBE_FRONT;
} else if ((0 == strncmp((*token), "cube_back", strlen("cube_back")))) {
ty = TEXTURE_TYPE_CUBE_BACK;
} else if ((0 == strncmp((*token), "sphere", strlen("sphere")))) {
ty = TEXTURE_TYPE_SPHERE;
}
(*token) = end;
return ty;
}
static tag_sizes parseTagTriple(const char **token) {
tag_sizes ts;
(*token) += strspn((*token), " \t");
ts.num_ints = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
return ts;
}
(*token)++; // Skip '/'
(*token) += strspn((*token), " \t");
ts.num_reals = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
return ts;
}
(*token)++; // Skip '/'
ts.num_strings = parseInt(token);
return ts;
}
// Parse triples with index offsets: i, i/j/k, i//k, i/j
static bool parseTriple(const char **token, int vsize, int vnsize, int vtsize,
vertex_index_t *ret) {
if (!ret) {
return false;
}
vertex_index_t vi(-1);
if (!fixIndex(atoi((*token)), vsize, &(vi.v_idx))) {
return false;
}
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
(*ret) = vi;
return true;
}
(*token)++;
// i//k
if ((*token)[0] == '/') {
(*token)++;
if (!fixIndex(atoi((*token)), vnsize, &(vi.vn_idx))) {
return false;
}
(*token) += strcspn((*token), "/ \t\r");
(*ret) = vi;
return true;
}
// i/j/k or i/j
if (!fixIndex(atoi((*token)), vtsize, &(vi.vt_idx))) {
return false;
}
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
(*ret) = vi;
return true;
}
// i/j/k
(*token)++; // skip '/'
if (!fixIndex(atoi((*token)), vnsize, &(vi.vn_idx))) {
return false;
}
(*token) += strcspn((*token), "/ \t\r");
(*ret) = vi;
return true;
}
// Parse raw triples: i, i/j/k, i//k, i/j
static vertex_index_t parseRawTriple(const char **token) {
vertex_index_t vi(static_cast<int>(0)); // 0 is an invalid index in OBJ
vi.v_idx = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
return vi;
}
(*token)++;
// i//k
if ((*token)[0] == '/') {
(*token)++;
vi.vn_idx = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
return vi;
}
// i/j/k or i/j
vi.vt_idx = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
if ((*token)[0] != '/') {
return vi;
}
// i/j/k
(*token)++; // skip '/'
vi.vn_idx = atoi((*token));
(*token) += strcspn((*token), "/ \t\r");
return vi;
}
bool ParseTextureNameAndOption(std::string *texname, texture_option_t *texopt,
const char *linebuf) {
// @todo { write more robust lexer and parser. }
bool found_texname = false;
std::string texture_name;
const char *token = linebuf; // Assume line ends with NULL
while (!IS_NEW_LINE((*token))) {
token += strspn(token, " \t"); // skip space
if ((0 == strncmp(token, "-blendu", 7)) && IS_SPACE((token[7]))) {
token += 8;
texopt->blendu = parseOnOff(&token, /* default */ true);
} else if ((0 == strncmp(token, "-blendv", 7)) && IS_SPACE((token[7]))) {
token += 8;
texopt->blendv = parseOnOff(&token, /* default */ true);
} else if ((0 == strncmp(token, "-clamp", 6)) && IS_SPACE((token[6]))) {
token += 7;
texopt->clamp = parseOnOff(&token, /* default */ true);
} else if ((0 == strncmp(token, "-boost", 6)) && IS_SPACE((token[6]))) {
token += 7;
texopt->sharpness = parseReal(&token, 1.0);
} else if ((0 == strncmp(token, "-bm", 3)) && IS_SPACE((token[3]))) {
token += 4;
texopt->bump_multiplier = parseReal(&token, 1.0);
} else if ((0 == strncmp(token, "-o", 2)) && IS_SPACE((token[2]))) {
token += 3;
parseReal3(&(texopt->origin_offset[0]), &(texopt->origin_offset[1]),
&(texopt->origin_offset[2]), &token);
} else if ((0 == strncmp(token, "-s", 2)) && IS_SPACE((token[2]))) {
token += 3;
parseReal3(&(texopt->scale[0]), &(texopt->scale[1]), &(texopt->scale[2]),
&token, 1.0, 1.0, 1.0);
} else if ((0 == strncmp(token, "-t", 2)) && IS_SPACE((token[2]))) {
token += 3;
parseReal3(&(texopt->turbulence[0]), &(texopt->turbulence[1]),
&(texopt->turbulence[2]), &token);
} else if ((0 == strncmp(token, "-type", 5)) && IS_SPACE((token[5]))) {
token += 5;
texopt->type = parseTextureType((&token), TEXTURE_TYPE_NONE);
} else if ((0 == strncmp(token, "-texres", 7)) && IS_SPACE((token[7]))) {
token += 7;
// TODO(syoyo): Check if arg is int type.
texopt->texture_resolution = parseInt(&token);
} else if ((0 == strncmp(token, "-imfchan", 8)) && IS_SPACE((token[8]))) {
token += 9;
token += strspn(token, " \t");
const char *end = token + strcspn(token, " \t\r");
if ((end - token) == 1) { // Assume one char for -imfchan
texopt->imfchan = (*token);
}
token = end;
} else if ((0 == strncmp(token, "-mm", 3)) && IS_SPACE((token[3]))) {
token += 4;
parseReal2(&(texopt->brightness), &(texopt->contrast), &token, 0.0, 1.0);
} else if ((0 == strncmp(token, "-colorspace", 11)) &&
IS_SPACE((token[11]))) {
token += 12;
texopt->colorspace = parseString(&token);
} else {
// Assume texture filename
#if 0
size_t len = strcspn(token, " \t\r"); // untile next space
texture_name = std::string(token, token + len);
token += len;
token += strspn(token, " \t"); // skip space
#else
// Read filename until line end to parse filename containing whitespace
// TODO(syoyo): Support parsing texture option flag after the filename.
texture_name = std::string(token);
token += texture_name.length();
#endif
found_texname = true;
}
}
if (found_texname) {
(*texname) = texture_name;
return true;
} else {
return false;
}
}
static void InitTexOpt(texture_option_t *texopt, const bool is_bump) {
if (is_bump) {
texopt->imfchan = 'l';
} else {
texopt->imfchan = 'm';
}
texopt->bump_multiplier = static_cast<real_t>(1.0);
texopt->clamp = false;
texopt->blendu = true;
texopt->blendv = true;
texopt->sharpness = static_cast<real_t>(1.0);
texopt->brightness = static_cast<real_t>(0.0);
texopt->contrast = static_cast<real_t>(1.0);
texopt->origin_offset[0] = static_cast<real_t>(0.0);
texopt->origin_offset[1] = static_cast<real_t>(0.0);
texopt->origin_offset[2] = static_cast<real_t>(0.0);
texopt->scale[0] = static_cast<real_t>(1.0);
texopt->scale[1] = static_cast<real_t>(1.0);
texopt->scale[2] = static_cast<real_t>(1.0);
texopt->turbulence[0] = static_cast<real_t>(0.0);
texopt->turbulence[1] = static_cast<real_t>(0.0);
texopt->turbulence[2] = static_cast<real_t>(0.0);
texopt->texture_resolution = -1;
texopt->type = TEXTURE_TYPE_NONE;
}
static void InitMaterial(material_t *material) {
InitTexOpt(&material->ambient_texopt, /* is_bump */ false);
InitTexOpt(&material->diffuse_texopt, /* is_bump */ false);
InitTexOpt(&material->specular_texopt, /* is_bump */ false);
InitTexOpt(&material->specular_highlight_texopt, /* is_bump */ false);
InitTexOpt(&material->bump_texopt, /* is_bump */ true);
InitTexOpt(&material->displacement_texopt, /* is_bump */ false);
InitTexOpt(&material->alpha_texopt, /* is_bump */ false);
InitTexOpt(&material->reflection_texopt, /* is_bump */ false);
InitTexOpt(&material->roughness_texopt, /* is_bump */ false);
InitTexOpt(&material->metallic_texopt, /* is_bump */ false);
InitTexOpt(&material->sheen_texopt, /* is_bump */ false);
InitTexOpt(&material->emissive_texopt, /* is_bump */ false);
InitTexOpt(&material->normal_texopt,
/* is_bump */ false); // @fixme { is_bump will be true? }
material->name = "";
material->ambient_texname = "";
material->diffuse_texname = "";
material->specular_texname = "";
material->specular_highlight_texname = "";
material->bump_texname = "";
material->displacement_texname = "";
material->reflection_texname = "";
material->alpha_texname = "";
for (int i = 0; i < 3; i++) {
material->ambient[i] = static_cast<real_t>(0.0);
material->diffuse[i] = static_cast<real_t>(0.0);
material->specular[i] = static_cast<real_t>(0.0);
material->transmittance[i] = static_cast<real_t>(0.0);
material->emission[i] = static_cast<real_t>(0.0);
}
material->illum = 0;
material->dissolve = static_cast<real_t>(1.0);
material->shininess = static_cast<real_t>(1.0);
material->ior = static_cast<real_t>(1.0);
material->roughness = static_cast<real_t>(0.0);
material->metallic = static_cast<real_t>(0.0);
material->sheen = static_cast<real_t>(0.0);
material->clearcoat_thickness = static_cast<real_t>(0.0);
material->clearcoat_roughness = static_cast<real_t>(0.0);
material->anisotropy_rotation = static_cast<real_t>(0.0);
material->anisotropy = static_cast<real_t>(0.0);
material->roughness_texname = "";
material->metallic_texname = "";
material->sheen_texname = "";
material->emissive_texname = "";
material->normal_texname = "";
material->unknown_parameter.clear();
}
// code from https://wrf.ecse.rpi.edu//Research/Short_Notes/pnpoly.html
template <typename T>
static int pnpoly(int nvert, T *vertx, T *verty, T testx, T testy) {
int i, j, c = 0;
for (i = 0, j = nvert - 1; i < nvert; j = i++) {
if (((verty[i] > testy) != (verty[j] > testy)) &&
(testx <
(vertx[j] - vertx[i]) * (testy - verty[i]) / (verty[j] - verty[i]) +
vertx[i]))
c = !c;
}
return c;
}
// TODO(syoyo): refactor function.
static bool exportGroupsToShape(shape_t *shape, const PrimGroup &prim_group,
const std::vector<tag_t> &tags,
const int material_id, const std::string &name,
bool triangulate,
const std::vector<real_t> &v) {
if (prim_group.IsEmpty()) {
return false;
}
shape->name = name;
// polygon
if (!prim_group.faceGroup.empty()) {
// Flatten vertices and indices
for (size_t i = 0; i < prim_group.faceGroup.size(); i++) {
const face_t &face = prim_group.faceGroup[i];
size_t npolys = face.vertex_indices.size();
if (npolys < 3) {
// Face must have 3+ vertices.
continue;
}
vertex_index_t i0 = face.vertex_indices[0];
vertex_index_t i1(-1);
vertex_index_t i2 = face.vertex_indices[1];
if (triangulate) {
// find the two axes to work in
size_t axes[2] = {1, 2};
for (size_t k = 0; k < npolys; ++k) {
i0 = face.vertex_indices[(k + 0) % npolys];
i1 = face.vertex_indices[(k + 1) % npolys];
i2 = face.vertex_indices[(k + 2) % npolys];
size_t vi0 = size_t(i0.v_idx);
size_t vi1 = size_t(i1.v_idx);
size_t vi2 = size_t(i2.v_idx);
if (((3 * vi0 + 2) >= v.size()) || ((3 * vi1 + 2) >= v.size()) ||
((3 * vi2 + 2) >= v.size())) {
// Invalid triangle.
// FIXME(syoyo): Is it ok to simply skip this invalid triangle?
continue;
}
real_t v0x = v[vi0 * 3 + 0];
real_t v0y = v[vi0 * 3 + 1];
real_t v0z = v[vi0 * 3 + 2];
real_t v1x = v[vi1 * 3 + 0];
real_t v1y = v[vi1 * 3 + 1];
real_t v1z = v[vi1 * 3 + 2];
real_t v2x = v[vi2 * 3 + 0];
real_t v2y = v[vi2 * 3 + 1];
real_t v2z = v[vi2 * 3 + 2];
real_t e0x = v1x - v0x;
real_t e0y = v1y - v0y;
real_t e0z = v1z - v0z;
real_t e1x = v2x - v1x;
real_t e1y = v2y - v1y;
real_t e1z = v2z - v1z;
real_t cx = std::fabs(e0y * e1z - e0z * e1y);
real_t cy = std::fabs(e0z * e1x - e0x * e1z);
real_t cz = std::fabs(e0x * e1y - e0y * e1x);
const real_t epsilon = std::numeric_limits<real_t>::epsilon();
if (cx > epsilon || cy > epsilon || cz > epsilon) {
// found a corner
if (cx > cy && cx > cz) {
} else {
axes[0] = 0;
if (cz > cx && cz > cy) axes[1] = 1;
}
break;
}
}
real_t area = 0;
for (size_t k = 0; k < npolys; ++k) {
i0 = face.vertex_indices[(k + 0) % npolys];
i1 = face.vertex_indices[(k + 1) % npolys];
size_t vi0 = size_t(i0.v_idx);
size_t vi1 = size_t(i1.v_idx);
if (((vi0 * 3 + axes[0]) >= v.size()) ||
((vi0 * 3 + axes[1]) >= v.size()) ||
((vi1 * 3 + axes[0]) >= v.size()) ||
((vi1 * 3 + axes[1]) >= v.size())) {
// Invalid index.
continue;
}
real_t v0x = v[vi0 * 3 + axes[0]];
real_t v0y = v[vi0 * 3 + axes[1]];
real_t v1x = v[vi1 * 3 + axes[0]];
real_t v1y = v[vi1 * 3 + axes[1]];
area += (v0x * v1y - v0y * v1x) * static_cast<real_t>(0.5);
}
face_t remainingFace = face; // copy
size_t guess_vert = 0;
vertex_index_t ind[3];
real_t vx[3];
real_t vy[3];
// How many iterations can we do without decreasing the remaining
// vertices.
size_t remainingIterations = face.vertex_indices.size();
size_t previousRemainingVertices = remainingFace.vertex_indices.size();
while (remainingFace.vertex_indices.size() > 3 &&
remainingIterations > 0) {
npolys = remainingFace.vertex_indices.size();
if (guess_vert >= npolys) {
guess_vert -= npolys;
}
if (previousRemainingVertices != npolys) {
// The number of remaining vertices decreased. Reset counters.
previousRemainingVertices = npolys;
remainingIterations = npolys;
} else {
// We didn't consume a vertex on previous iteration, reduce the
// available iterations.
remainingIterations--;
}
for (size_t k = 0; k < 3; k++) {
ind[k] = remainingFace.vertex_indices[(guess_vert + k) % npolys];
size_t vi = size_t(ind[k].v_idx);
if (((vi * 3 + axes[0]) >= v.size()) ||
((vi * 3 + axes[1]) >= v.size())) {
// ???
vx[k] = static_cast<real_t>(0.0);
vy[k] = static_cast<real_t>(0.0);
} else {
vx[k] = v[vi * 3 + axes[0]];
vy[k] = v[vi * 3 + axes[1]];
}
}
real_t e0x = vx[1] - vx[0];
real_t e0y = vy[1] - vy[0];
real_t e1x = vx[2] - vx[1];
real_t e1y = vy[2] - vy[1];
real_t cross = e0x * e1y - e0y * e1x;
// if an internal angle
if (cross * area < static_cast<real_t>(0.0)) {
guess_vert += 1;
continue;
}
// check all other verts in case they are inside this triangle
bool overlap = false;
for (size_t otherVert = 3; otherVert < npolys; ++otherVert) {
size_t idx = (guess_vert + otherVert) % npolys;
if (idx >= remainingFace.vertex_indices.size()) {
// ???
continue;
}
size_t ovi = size_t(remainingFace.vertex_indices[idx].v_idx);
if (((ovi * 3 + axes[0]) >= v.size()) ||
((ovi * 3 + axes[1]) >= v.size())) {
// ???
continue;
}
real_t tx = v[ovi * 3 + axes[0]];
real_t ty = v[ovi * 3 + axes[1]];
if (pnpoly(3, vx, vy, tx, ty)) {
overlap = true;
break;
}
}
if (overlap) {
guess_vert += 1;
continue;
}
// this triangle is an ear
{
index_t idx0, idx1, idx2;
idx0.vertex_index = ind[0].v_idx;
idx0.normal_index = ind[0].vn_idx;
idx0.texcoord_index = ind[0].vt_idx;
idx1.vertex_index = ind[1].v_idx;
idx1.normal_index = ind[1].vn_idx;
idx1.texcoord_index = ind[1].vt_idx;
idx2.vertex_index = ind[2].v_idx;
idx2.normal_index = ind[2].vn_idx;
idx2.texcoord_index = ind[2].vt_idx;
shape->mesh.vertex_indices.push_back( idx0.vertex_index );
shape->mesh.vertex_indices.push_back( idx1.vertex_index );
shape->mesh.vertex_indices.push_back( idx2.vertex_index );
shape->mesh.normal_indices.push_back( idx0.normal_index );
shape->mesh.normal_indices.push_back( idx1.normal_index );
shape->mesh.normal_indices.push_back( idx2.normal_index );
shape->mesh.texcoord_indices.push_back( idx0.texcoord_index );
shape->mesh.texcoord_indices.push_back( idx1.texcoord_index );
shape->mesh.texcoord_indices.push_back( idx2.texcoord_index );
shape->mesh.num_face_vertices.push_back(3);
shape->mesh.material_ids.push_back(material_id);
shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id);
}
// remove v1 from the list
size_t removed_vert_index = (guess_vert + 1) % npolys;
while (removed_vert_index + 1 < npolys) {
remainingFace.vertex_indices[removed_vert_index] =
remainingFace.vertex_indices[removed_vert_index + 1];
removed_vert_index += 1;
}
remainingFace.vertex_indices.pop_back();
}
if (remainingFace.vertex_indices.size() == 3) {
i0 = remainingFace.vertex_indices[0];
i1 = remainingFace.vertex_indices[1];
i2 = remainingFace.vertex_indices[2];
{
index_t idx0, idx1, idx2;
idx0.vertex_index = i0.v_idx;
idx0.normal_index = i0.vn_idx;
idx0.texcoord_index = i0.vt_idx;
idx1.vertex_index = i1.v_idx;
idx1.normal_index = i1.vn_idx;
idx1.texcoord_index = i1.vt_idx;
idx2.vertex_index = i2.v_idx;
idx2.normal_index = i2.vn_idx;
idx2.texcoord_index = i2.vt_idx;
shape->mesh.vertex_indices.push_back( idx0.vertex_index );
shape->mesh.vertex_indices.push_back( idx1.vertex_index );
shape->mesh.vertex_indices.push_back( idx2.vertex_index );
shape->mesh.normal_indices.push_back( idx0.normal_index );
shape->mesh.normal_indices.push_back( idx1.normal_index );
shape->mesh.normal_indices.push_back( idx2.normal_index );
shape->mesh.texcoord_indices.push_back( idx0.texcoord_index );
shape->mesh.texcoord_indices.push_back( idx1.texcoord_index );
shape->mesh.texcoord_indices.push_back( idx2.texcoord_index );
shape->mesh.num_face_vertices.push_back(3);
shape->mesh.material_ids.push_back(material_id);
shape->mesh.smoothing_group_ids.push_back(face.smoothing_group_id);
}
}
} else {
for (size_t k = 0; k < npolys; k++) {
index_t idx;
idx.vertex_index = face.vertex_indices[k].v_idx;
idx.normal_index = face.vertex_indices[k].vn_idx;
idx.texcoord_index = face.vertex_indices[k].vt_idx;
shape->mesh.vertex_indices.push_back( idx.vertex_index );
shape->mesh.normal_indices.push_back( idx.normal_index );
shape->mesh.texcoord_indices.push_back( idx.texcoord_index );
}
shape->mesh.num_face_vertices.push_back(
static_cast<unsigned char>(npolys));
shape->mesh.material_ids.push_back(material_id); // per face
shape->mesh.smoothing_group_ids.push_back(
face.smoothing_group_id); // per face
}
}
shape->mesh.tags = tags;
}
// line
if (!prim_group.lineGroup.empty()) {
// Flatten indices
for (size_t i = 0; i < prim_group.lineGroup.size(); i++) {
for (size_t j = 0; j < prim_group.lineGroup[i].vertex_indices.size();
j++) {
const vertex_index_t &vi = prim_group.lineGroup[i].vertex_indices[j];
index_t idx;
idx.vertex_index = vi.v_idx;
idx.normal_index = vi.vn_idx;
idx.texcoord_index = vi.vt_idx;
shape->lines.indices.push_back(idx);
}
shape->lines.num_line_vertices.push_back(
int(prim_group.lineGroup[i].vertex_indices.size()));
}
}
// points
if (!prim_group.pointsGroup.empty()) {
// Flatten & convert indices
for (size_t i = 0; i < prim_group.pointsGroup.size(); i++) {
for (size_t j = 0; j < prim_group.pointsGroup[i].vertex_indices.size();
j++) {
const vertex_index_t &vi = prim_group.pointsGroup[i].vertex_indices[j];
index_t idx;
idx.vertex_index = vi.v_idx;
idx.normal_index = vi.vn_idx;
idx.texcoord_index = vi.vt_idx;
shape->points.indices.push_back(idx);
}
}
}
return true;
}
// Split a string with specified delimiter character.
// http://stackoverflow.com/questions/236129/split-a-string-in-c
static void SplitString(const std::string &s, char delim,
std::vector<std::string> &elems) {
std::stringstream ss;
ss.str(s);
std::string item;
while (std::getline(ss, item, delim)) {
elems.push_back(item);
}
}
static std::string JoinPath(const std::string &dir,
const std::string &filename) {
if (dir.empty()) {
return filename;
} else {
// check '/'
char lastChar = *dir.rbegin();
if (lastChar != '/') {
return dir + std::string("/") + filename;
} else {
return dir + filename;
}
}
}
void LoadMtl(std::map<std::string, int> *material_map,
std::vector<material_t> *materials, std::istream *inStream,
std::string *warning, std::string *err) {
(void)err;
// Create a default material anyway.
material_t material;
InitMaterial(&material);
// Issue 43. `d` wins against `Tr` since `Tr` is not in the MTL specification.
bool has_d = false;
bool has_tr = false;
// has_kd is used to set a default diffuse value when map_Kd is present
// and Kd is not.
bool has_kd = false;
std::stringstream warn_ss;
size_t line_no = 0;
std::string linebuf;
while (inStream->peek() != -1) {
safeGetline(*inStream, linebuf);
line_no++;
// Trim trailing whitespace.
if (linebuf.size() > 0) {
linebuf = linebuf.substr(0, linebuf.find_last_not_of(" \t") + 1);
}
// Trim newline '\r\n' or '\n'
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\n')
linebuf.erase(linebuf.size() - 1);
}
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\r')
linebuf.erase(linebuf.size() - 1);
}
// Skip if empty line.
if (linebuf.empty()) {
continue;
}
// Skip leading space.
const char *token = linebuf.c_str();
token += strspn(token, " \t");
assert(token);
if (token[0] == '\0') continue; // empty line
if (token[0] == '#') continue; // comment line
// new mtl
if ((0 == strncmp(token, "newmtl", 6)) && IS_SPACE((token[6]))) {
// flush previous material.
if (!material.name.empty()) {
material_map->insert(std::pair<std::string, int>(
material.name, static_cast<int>(materials->size())));
materials->push_back(material);
}
// initial temporary material
InitMaterial(&material);
has_d = false;
has_tr = false;
// set new mtl name
token += 7;
{
std::stringstream sstr;
sstr << token;
material.name = sstr.str();
}
continue;
}
// ambient
if (token[0] == 'K' && token[1] == 'a' && IS_SPACE((token[2]))) {
token += 2;
real_t r, g, b;
parseReal3(&r, &g, &b, &token);
material.ambient[0] = r;
material.ambient[1] = g;
material.ambient[2] = b;
continue;
}
// diffuse
if (token[0] == 'K' && token[1] == 'd' && IS_SPACE((token[2]))) {
token += 2;
real_t r, g, b;
parseReal3(&r, &g, &b, &token);
material.diffuse[0] = r;
material.diffuse[1] = g;
material.diffuse[2] = b;
has_kd = true;
continue;
}
// specular
if (token[0] == 'K' && token[1] == 's' && IS_SPACE((token[2]))) {
token += 2;
real_t r, g, b;
parseReal3(&r, &g, &b, &token);
material.specular[0] = r;
material.specular[1] = g;
material.specular[2] = b;
continue;
}
// transmittance
if ((token[0] == 'K' && token[1] == 't' && IS_SPACE((token[2]))) ||
(token[0] == 'T' && token[1] == 'f' && IS_SPACE((token[2])))) {
token += 2;
real_t r, g, b;
parseReal3(&r, &g, &b, &token);
material.transmittance[0] = r;
material.transmittance[1] = g;
material.transmittance[2] = b;
continue;
}
// ior(index of refraction)
if (token[0] == 'N' && token[1] == 'i' && IS_SPACE((token[2]))) {
token += 2;
material.ior = parseReal(&token);
continue;
}
// emission
if (token[0] == 'K' && token[1] == 'e' && IS_SPACE(token[2])) {
token += 2;
real_t r, g, b;
parseReal3(&r, &g, &b, &token);
material.emission[0] = r;
material.emission[1] = g;
material.emission[2] = b;
continue;
}
// shininess
if (token[0] == 'N' && token[1] == 's' && IS_SPACE(token[2])) {
token += 2;
material.shininess = parseReal(&token);
continue;
}
// illum model
if (0 == strncmp(token, "illum", 5) && IS_SPACE(token[5])) {
token += 6;
material.illum = parseInt(&token);
continue;
}
// dissolve
if ((token[0] == 'd' && IS_SPACE(token[1]))) {
token += 1;
material.dissolve = parseReal(&token);
if (has_tr) {
warn_ss << "Both `d` and `Tr` parameters defined for \""
<< material.name
<< "\". Use the value of `d` for dissolve (line " << line_no
<< " in .mtl.)" << std::endl;
}
has_d = true;
continue;
}
if (token[0] == 'T' && token[1] == 'r' && IS_SPACE(token[2])) {
token += 2;
if (has_d) {
// `d` wins. Ignore `Tr` value.
warn_ss << "Both `d` and `Tr` parameters defined for \""
<< material.name
<< "\". Use the value of `d` for dissolve (line " << line_no
<< " in .mtl.)" << std::endl;
} else {
// We invert value of Tr(assume Tr is in range [0, 1])
// NOTE: Interpretation of Tr is application(exporter) dependent. For
// some application(e.g. 3ds max obj exporter), Tr = d(Issue 43)
material.dissolve = static_cast<real_t>(1.0) - parseReal(&token);
}
has_tr = true;
continue;
}
// PBR: roughness
if (token[0] == 'P' && token[1] == 'r' && IS_SPACE(token[2])) {
token += 2;
material.roughness = parseReal(&token);
continue;
}
// PBR: metallic
if (token[0] == 'P' && token[1] == 'm' && IS_SPACE(token[2])) {
token += 2;
material.metallic = parseReal(&token);
continue;
}
// PBR: sheen
if (token[0] == 'P' && token[1] == 's' && IS_SPACE(token[2])) {
token += 2;
material.sheen = parseReal(&token);
continue;
}
// PBR: clearcoat thickness
if (token[0] == 'P' && token[1] == 'c' && IS_SPACE(token[2])) {
token += 2;
material.clearcoat_thickness = parseReal(&token);
continue;
}
// PBR: clearcoat roughness
if ((0 == strncmp(token, "Pcr", 3)) && IS_SPACE(token[3])) {
token += 4;
material.clearcoat_roughness = parseReal(&token);
continue;
}
// PBR: anisotropy
if ((0 == strncmp(token, "aniso", 5)) && IS_SPACE(token[5])) {
token += 6;
material.anisotropy = parseReal(&token);
continue;
}
// PBR: anisotropy rotation
if ((0 == strncmp(token, "anisor", 6)) && IS_SPACE(token[6])) {
token += 7;
material.anisotropy_rotation = parseReal(&token);
continue;
}
// ambient texture
if ((0 == strncmp(token, "map_Ka", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.ambient_texname),
&(material.ambient_texopt), token);
continue;
}
// diffuse texture
if ((0 == strncmp(token, "map_Kd", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.diffuse_texname),
&(material.diffuse_texopt), token);
// Set a decent diffuse default value if a diffuse texture is specified
// without a matching Kd value.
if (!has_kd)
{
material.diffuse[0] = static_cast<real_t>(0.6);
material.diffuse[1] = static_cast<real_t>(0.6);
material.diffuse[2] = static_cast<real_t>(0.6);
}
continue;
}
// specular texture
if ((0 == strncmp(token, "map_Ks", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.specular_texname),
&(material.specular_texopt), token);
continue;
}
// specular highlight texture
if ((0 == strncmp(token, "map_Ns", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.specular_highlight_texname),
&(material.specular_highlight_texopt), token);
continue;
}
// bump texture
if ((0 == strncmp(token, "map_bump", 8)) && IS_SPACE(token[8])) {
token += 9;
ParseTextureNameAndOption(&(material.bump_texname),
&(material.bump_texopt), token);
continue;
}
// bump texture
if ((0 == strncmp(token, "map_Bump", 8)) && IS_SPACE(token[8])) {
token += 9;
ParseTextureNameAndOption(&(material.bump_texname),
&(material.bump_texopt), token);
continue;
}
// bump texture
if ((0 == strncmp(token, "bump", 4)) && IS_SPACE(token[4])) {
token += 5;
ParseTextureNameAndOption(&(material.bump_texname),
&(material.bump_texopt), token);
continue;
}
// alpha texture
if ((0 == strncmp(token, "map_d", 5)) && IS_SPACE(token[5])) {
token += 6;
material.alpha_texname = token;
ParseTextureNameAndOption(&(material.alpha_texname),
&(material.alpha_texopt), token);
continue;
}
// displacement texture
if ((0 == strncmp(token, "disp", 4)) && IS_SPACE(token[4])) {
token += 5;
ParseTextureNameAndOption(&(material.displacement_texname),
&(material.displacement_texopt), token);
continue;
}
// reflection map
if ((0 == strncmp(token, "refl", 4)) && IS_SPACE(token[4])) {
token += 5;
ParseTextureNameAndOption(&(material.reflection_texname),
&(material.reflection_texopt), token);
continue;
}
// PBR: roughness texture
if ((0 == strncmp(token, "map_Pr", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.roughness_texname),
&(material.roughness_texopt), token);
continue;
}
// PBR: metallic texture
if ((0 == strncmp(token, "map_Pm", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.metallic_texname),
&(material.metallic_texopt), token);
continue;
}
// PBR: sheen texture
if ((0 == strncmp(token, "map_Ps", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.sheen_texname),
&(material.sheen_texopt), token);
continue;
}
// PBR: emissive texture
if ((0 == strncmp(token, "map_Ke", 6)) && IS_SPACE(token[6])) {
token += 7;
ParseTextureNameAndOption(&(material.emissive_texname),
&(material.emissive_texopt), token);
continue;
}
// PBR: normal map texture
if ((0 == strncmp(token, "norm", 4)) && IS_SPACE(token[4])) {
token += 5;
ParseTextureNameAndOption(&(material.normal_texname),
&(material.normal_texopt), token);
continue;
}
// unknown parameter
const char *_space = strchr(token, ' ');
if (!_space) {
_space = strchr(token, '\t');
}
if (_space) {
std::ptrdiff_t len = _space - token;
std::string key(token, static_cast<size_t>(len));
std::string value = _space + 1;
material.unknown_parameter.insert(
std::pair<std::string, std::string>(key, value));
}
}
// flush last material.
material_map->insert(std::pair<std::string, int>(
material.name, static_cast<int>(materials->size())));
materials->push_back(material);
if (warning) {
(*warning) = warn_ss.str();
}
}
bool MaterialFileReader::operator()(const std::string &matId,
std::vector<material_t> *materials,
std::map<std::string, int> *matMap,
std::string *warn, std::string *err) {
if (!m_mtlBaseDir.empty()) {
#ifdef _WIN32
char sep = ';';
#else
char sep = ':';
#endif
// https://stackoverflow.com/questions/5167625/splitting-a-c-stdstring-using-tokens-e-g
std::vector<std::string> paths;
std::istringstream f(m_mtlBaseDir);
std::string s;
while (getline(f, s, sep)) {
paths.push_back(s);
}
for (size_t i = 0; i < paths.size(); i++) {
std::string filepath = JoinPath(paths[i], matId);
std::ifstream matIStream(filepath.c_str());
if (matIStream) {
LoadMtl(matMap, materials, &matIStream, warn, err);
return true;
}
}
std::stringstream ss;
ss << "Material file [ " << matId
<< " ] not found in a path : " << m_mtlBaseDir << std::endl;
if (warn) {
(*warn) += ss.str();
}
return false;
} else {
std::string filepath = matId;
std::ifstream matIStream(filepath.c_str());
if (matIStream) {
LoadMtl(matMap, materials, &matIStream, warn, err);
return true;
}
std::stringstream ss;
ss << "Material file [ " << filepath
<< " ] not found in a path : " << m_mtlBaseDir << std::endl;
if (warn) {
(*warn) += ss.str();
}
return false;
}
}
bool MaterialStreamReader::operator()(const std::string &matId,
std::vector<material_t> *materials,
std::map<std::string, int> *matMap,
std::string *warn, std::string *err) {
(void)err;
(void)matId;
if (!m_inStream) {
std::stringstream ss;
ss << "Material stream in error state. " << std::endl;
if (warn) {
(*warn) += ss.str();
}
return false;
}
LoadMtl(matMap, materials, &m_inStream, warn, err);
return true;
}
bool LoadObj(attrib_t *attrib, std::vector<shape_t> *shapes,
std::vector<material_t> *materials, std::string *warn,
std::string *err, const char *filename, const char *mtl_basedir,
bool trianglulate, bool default_vcols_fallback) {
attrib->vertices.clear();
attrib->normals.clear();
attrib->texcoords.clear();
attrib->colors.clear();
shapes->clear();
std::stringstream errss;
std::ifstream ifs(filename);
if (!ifs) {
errss << "Cannot open file [" << filename << "]" << std::endl;
if (err) {
(*err) = errss.str();
}
return false;
}
std::string baseDir = mtl_basedir ? mtl_basedir : "";
if (!baseDir.empty()) {
#ifndef _WIN32
const char dirsep = '/';
#else
const char dirsep = '\\';
#endif
if (baseDir[baseDir.length() - 1] != dirsep) baseDir += dirsep;
}
MaterialFileReader matFileReader(baseDir);
return LoadObj(attrib, shapes, materials, warn, err, &ifs, &matFileReader,
trianglulate, default_vcols_fallback);
}
bool LoadObj(attrib_t *attrib, std::vector<shape_t> *shapes,
std::vector<material_t> *materials, std::string *warn,
std::string *err, std::istream *inStream,
MaterialReader *readMatFn /*= NULL*/, bool triangulate,
bool default_vcols_fallback) {
std::stringstream errss;
std::vector<real_t> v;
std::vector<real_t> vn;
std::vector<real_t> vt;
std::vector<real_t> vc;
std::vector<tag_t> tags;
PrimGroup prim_group;
std::string name;
// material
std::map<std::string, int> material_map;
int material = -1;
// smoothing group id
unsigned int current_smoothing_id =
0; // Initial value. 0 means no smoothing.
int greatest_v_idx = -1;
int greatest_vn_idx = -1;
int greatest_vt_idx = -1;
shape_t shape;
bool found_all_colors = true;
size_t line_num = 0;
std::string linebuf;
while (inStream->peek() != -1) {
safeGetline(*inStream, linebuf);
line_num++;
// Trim newline '\r\n' or '\n'
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\n')
linebuf.erase(linebuf.size() - 1);
}
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\r')
linebuf.erase(linebuf.size() - 1);
}
// Skip if empty line.
if (linebuf.empty()) {
continue;
}
// Skip leading space.
const char *token = linebuf.c_str();
token += strspn(token, " \t");
assert(token);
if (token[0] == '\0') continue; // empty line
if (token[0] == '#') continue; // comment line
// vertex
if (token[0] == 'v' && IS_SPACE((token[1]))) {
token += 2;
real_t x, y, z;
real_t r, g, b;
found_all_colors &= parseVertexWithColor(&x, &y, &z, &r, &g, &b, &token);
v.push_back(x);
v.push_back(y);
v.push_back(z);
if (found_all_colors || default_vcols_fallback) {
vc.push_back(r);
vc.push_back(g);
vc.push_back(b);
}
continue;
}
// normal
if (token[0] == 'v' && token[1] == 'n' && IS_SPACE((token[2]))) {
token += 3;
real_t x, y, z;
parseReal3(&x, &y, &z, &token);
vn.push_back(x);
vn.push_back(y);
vn.push_back(z);
continue;
}
// texcoord
if (token[0] == 'v' && token[1] == 't' && IS_SPACE((token[2]))) {
token += 3;
real_t x, y;
parseReal2(&x, &y, &token);
vt.push_back(x);
vt.push_back(y);
continue;
}
// line
if (token[0] == 'l' && IS_SPACE((token[1]))) {
token += 2;
__line_t line;
while (!IS_NEW_LINE(token[0])) {
vertex_index_t vi;
if (!parseTriple(&token, static_cast<int>(v.size() / 3),
static_cast<int>(vn.size() / 3),
static_cast<int>(vt.size() / 2), &vi)) {
if (err) {
std::stringstream ss;
ss << "Failed parse `l' line(e.g. zero value for vertex index. "
"line "
<< line_num << ".)\n";
(*err) += ss.str();
}
return false;
}
line.vertex_indices.push_back(vi);
size_t n = strspn(token, " \t\r");
token += n;
}
prim_group.lineGroup.push_back(line);
continue;
}
// points
if (token[0] == 'p' && IS_SPACE((token[1]))) {
token += 2;
__points_t pts;
while (!IS_NEW_LINE(token[0])) {
vertex_index_t vi;
if (!parseTriple(&token, static_cast<int>(v.size() / 3),
static_cast<int>(vn.size() / 3),
static_cast<int>(vt.size() / 2), &vi)) {
if (err) {
std::stringstream ss;
ss << "Failed parse `p' line(e.g. zero value for vertex index. "
"line "
<< line_num << ".)\n";
(*err) += ss.str();
}
return false;
}
pts.vertex_indices.push_back(vi);
size_t n = strspn(token, " \t\r");
token += n;
}
prim_group.pointsGroup.push_back(pts);
continue;
}
// face
if (token[0] == 'f' && IS_SPACE((token[1]))) {
token += 2;
token += strspn(token, " \t");
face_t face;
face.smoothing_group_id = current_smoothing_id;
face.vertex_indices.reserve(3);
while (!IS_NEW_LINE(token[0])) {
vertex_index_t vi;
if (!parseTriple(&token, static_cast<int>(v.size() / 3),
static_cast<int>(vn.size() / 3),
static_cast<int>(vt.size() / 2), &vi)) {
if (err) {
std::stringstream ss;
ss << "Failed parse `f' line(e.g. zero value for face index. line "
<< line_num << ".)\n";
(*err) += ss.str();
}
return false;
}
greatest_v_idx = greatest_v_idx > vi.v_idx ? greatest_v_idx : vi.v_idx;
greatest_vn_idx =
greatest_vn_idx > vi.vn_idx ? greatest_vn_idx : vi.vn_idx;
greatest_vt_idx =
greatest_vt_idx > vi.vt_idx ? greatest_vt_idx : vi.vt_idx;
face.vertex_indices.push_back(vi);
size_t n = strspn(token, " \t\r");
token += n;
}
// replace with emplace_back + std::move on C++11
prim_group.faceGroup.push_back(face);
continue;
}
// use mtl
if ((0 == strncmp(token, "usemtl", 6))) {
token += 6;
std::string namebuf = parseString(&token);
int newMaterialId = -1;
std::map<std::string, int>::const_iterator it = material_map.find(namebuf);
if (it != material_map.end()) {
newMaterialId = it->second;
} else {
// { error!! material not found }
if (warn) {
(*warn) += "material [ '" + namebuf + "' ] not found in .mtl\n";
}
}
if (newMaterialId != material) {
// Create per-face material. Thus we don't add `shape` to `shapes` at
// this time.
// just clear `faceGroup` after `exportGroupsToShape()` call.
exportGroupsToShape(&shape, prim_group, tags, material, name,
triangulate, v);
prim_group.faceGroup.clear();
material = newMaterialId;
}
continue;
}
// load mtl
if ((0 == strncmp(token, "mtllib", 6)) && IS_SPACE((token[6]))) {
if (readMatFn) {
token += 7;
std::vector<std::string> filenames;
SplitString(std::string(token), ' ', filenames);
if (filenames.empty()) {
if (warn) {
std::stringstream ss;
ss << "Looks like empty filename for mtllib. Use default "
"material (line "
<< line_num << ".)\n";
(*warn) += ss.str();
}
} else {
bool found = false;
for (size_t s = 0; s < filenames.size(); s++) {
std::string warn_mtl;
std::string err_mtl;
bool ok = (*readMatFn)(filenames[s].c_str(), materials,
&material_map, &warn_mtl, &err_mtl);
if (warn && (!warn_mtl.empty())) {
(*warn) += warn_mtl;
}
if (err && (!err_mtl.empty())) {
(*err) += err_mtl;
}
if (ok) {
found = true;
break;
}
}
if (!found) {
if (warn) {
(*warn) +=
"Failed to load material file(s). Use default "
"material.\n";
}
}
}
}
continue;
}
// group name
if (token[0] == 'g' && IS_SPACE((token[1]))) {
// flush previous face group.
bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name,
triangulate, v);
(void)ret; // return value not used.
if (shape.mesh.vertex_indices.size() > 0) {
shapes->push_back(shape);
}
shape = shape_t();
// material = -1;
prim_group.clear();
std::vector<std::string> names;
while (!IS_NEW_LINE(token[0])) {
std::string str = parseString(&token);
names.push_back(str);
token += strspn(token, " \t\r"); // skip tag
}
// names[0] must be 'g'
if (names.size() < 2) {
// 'g' with empty names
if (warn) {
std::stringstream ss;
ss << "Empty group name. line: " << line_num << "\n";
(*warn) += ss.str();
name = "";
}
} else {
std::stringstream ss;
ss << names[1];
// tinyobjloader does not support multiple groups for a primitive.
// Currently we concatinate multiple group names with a space to get
// single group name.
for (size_t i = 2; i < names.size(); i++) {
ss << " " << names[i];
}
name = ss.str();
}
continue;
}
// object name
if (token[0] == 'o' && IS_SPACE((token[1]))) {
// flush previous face group.
bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name,
triangulate, v);
(void)ret; // return value not used.
if (shape.mesh.vertex_indices.size() > 0 || shape.lines.indices.size() > 0 ||
shape.points.indices.size() > 0) {
shapes->push_back(shape);
}
// material = -1;
prim_group.clear();
shape = shape_t();
// @todo { multiple object name? }
token += 2;
std::stringstream ss;
ss << token;
name = ss.str();
continue;
}
if (token[0] == 't' && IS_SPACE(token[1])) {
const int max_tag_nums = 8192; // FIXME(syoyo): Parameterize.
tag_t tag;
token += 2;
tag.name = parseString(&token);
tag_sizes ts = parseTagTriple(&token);
if (ts.num_ints < 0) {
ts.num_ints = 0;
}
if (ts.num_ints > max_tag_nums) {
ts.num_ints = max_tag_nums;
}
if (ts.num_reals < 0) {
ts.num_reals = 0;
}
if (ts.num_reals > max_tag_nums) {
ts.num_reals = max_tag_nums;
}
if (ts.num_strings < 0) {
ts.num_strings = 0;
}
if (ts.num_strings > max_tag_nums) {
ts.num_strings = max_tag_nums;
}
tag.intValues.resize(static_cast<size_t>(ts.num_ints));
for (size_t i = 0; i < static_cast<size_t>(ts.num_ints); ++i) {
tag.intValues[i] = parseInt(&token);
}
tag.floatValues.resize(static_cast<size_t>(ts.num_reals));
for (size_t i = 0; i < static_cast<size_t>(ts.num_reals); ++i) {
tag.floatValues[i] = parseReal(&token);
}
tag.stringValues.resize(static_cast<size_t>(ts.num_strings));
for (size_t i = 0; i < static_cast<size_t>(ts.num_strings); ++i) {
tag.stringValues[i] = parseString(&token);
}
tags.push_back(tag);
continue;
}
if (token[0] == 's' && IS_SPACE(token[1])) {
// smoothing group id
token += 2;
// skip space.
token += strspn(token, " \t"); // skip space
if (token[0] == '\0') {
continue;
}
if (token[0] == '\r' || token[1] == '\n') {
continue;
}
if (strlen(token) >= 3 && token[0] == 'o' && token[1] == 'f' &&
token[2] == 'f') {
current_smoothing_id = 0;
} else {
// assume number
int smGroupId = parseInt(&token);
if (smGroupId < 0) {
// parse error. force set to 0.
// FIXME(syoyo): Report warning.
current_smoothing_id = 0;
} else {
current_smoothing_id = static_cast<unsigned int>(smGroupId);
}
}
continue;
} // smoothing group id
// Ignore unknown command.
}
// not all vertices have colors, no default colors desired? -> clear colors
if (!found_all_colors && !default_vcols_fallback) {
vc.clear();
}
if (greatest_v_idx >= static_cast<int>(v.size() / 3)) {
if (warn) {
std::stringstream ss;
ss << "Vertex indices out of bounds (line " << line_num << ".)\n"
<< std::endl;
(*warn) += ss.str();
}
}
if (greatest_vn_idx >= static_cast<int>(vn.size() / 3)) {
if (warn) {
std::stringstream ss;
ss << "Vertex normal indices out of bounds (line " << line_num << ".)\n"
<< std::endl;
(*warn) += ss.str();
}
}
if (greatest_vt_idx >= static_cast<int>(vt.size() / 2)) {
if (warn) {
std::stringstream ss;
ss << "Vertex texcoord indices out of bounds (line " << line_num << ".)\n"
<< std::endl;
(*warn) += ss.str();
}
}
bool ret = exportGroupsToShape(&shape, prim_group, tags, material, name,
triangulate, v);
// exportGroupsToShape return false when `usemtl` is called in the last
// line.
// we also add `shape` to `shapes` when `shape.mesh` has already some
// faces(indices)
if (ret || shape.mesh.vertex_indices
.size()) { // FIXME(syoyo): Support other prims(e.g. lines)
shapes->push_back(shape);
}
prim_group.clear(); // for safety
if (err) {
(*err) += errss.str();
}
attrib->vertices.swap(v);
attrib->vertex_weights.swap(v);
attrib->normals.swap(vn);
attrib->texcoords.swap(vt);
attrib->texcoord_ws.swap(vt);
attrib->colors.swap(vc);
return true;
}
bool LoadObjWithCallback(std::istream &inStream, const callback_t &callback,
void *user_data /*= NULL*/,
MaterialReader *readMatFn /*= NULL*/,
std::string *warn, /* = NULL*/
std::string *err /*= NULL*/) {
std::stringstream errss;
// material
std::map<std::string, int> material_map;
int material_id = -1; // -1 = invalid
std::vector<index_t> indices;
std::vector<material_t> materials;
std::vector<std::string> names;
names.reserve(2);
std::vector<const char *> names_out;
std::string linebuf;
while (inStream.peek() != -1) {
safeGetline(inStream, linebuf);
// Trim newline '\r\n' or '\n'
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\n')
linebuf.erase(linebuf.size() - 1);
}
if (linebuf.size() > 0) {
if (linebuf[linebuf.size() - 1] == '\r')
linebuf.erase(linebuf.size() - 1);
}
// Skip if empty line.
if (linebuf.empty()) {
continue;
}
// Skip leading space.
const char *token = linebuf.c_str();
token += strspn(token, " \t");
assert(token);
if (token[0] == '\0') continue; // empty line
if (token[0] == '#') continue; // comment line
// vertex
if (token[0] == 'v' && IS_SPACE((token[1]))) {
token += 2;
// TODO(syoyo): Support parsing vertex color extension.
real_t x, y, z, w; // w is optional. default = 1.0
parseV(&x, &y, &z, &w, &token);
if (callback.vertex_cb) {
callback.vertex_cb(user_data, x, y, z, w);
}
continue;
}
// normal
if (token[0] == 'v' && token[1] == 'n' && IS_SPACE((token[2]))) {
token += 3;
real_t x, y, z;
parseReal3(&x, &y, &z, &token);
if (callback.normal_cb) {
callback.normal_cb(user_data, x, y, z);
}
continue;
}
// texcoord
if (token[0] == 'v' && token[1] == 't' && IS_SPACE((token[2]))) {
token += 3;
real_t x, y, z; // y and z are optional. default = 0.0
parseReal3(&x, &y, &z, &token);
if (callback.texcoord_cb) {
callback.texcoord_cb(user_data, x, y, z);
}
continue;
}
// face
if (token[0] == 'f' && IS_SPACE((token[1]))) {
token += 2;
token += strspn(token, " \t");
indices.clear();
while (!IS_NEW_LINE(token[0])) {
vertex_index_t vi = parseRawTriple(&token);
index_t idx;
idx.vertex_index = vi.v_idx;
idx.normal_index = vi.vn_idx;
idx.texcoord_index = vi.vt_idx;
indices.push_back(idx);
size_t n = strspn(token, " \t\r");
token += n;
}
if (callback.index_cb && indices.size() > 0) {
callback.index_cb(user_data, &indices.at(0),
static_cast<int>(indices.size()));
}
continue;
}
// use mtl
if ((0 == strncmp(token, "usemtl", 6)) && IS_SPACE((token[6]))) {
token += 7;
std::stringstream ss;
ss << token;
std::string namebuf = ss.str();
int newMaterialId = -1;
std::map<std::string, int>::const_iterator it = material_map.find(namebuf);
if (it != material_map.end()) {
newMaterialId = it->second;
} else {
// { warn!! material not found }
if (warn && (!callback.usemtl_cb)) {
(*warn) += "material [ " + namebuf + " ] not found in .mtl\n";
}
}
if (newMaterialId != material_id) {
material_id = newMaterialId;
}
if (callback.usemtl_cb) {
callback.usemtl_cb(user_data, namebuf.c_str(), material_id);
}
continue;
}
// load mtl
if ((0 == strncmp(token, "mtllib", 6)) && IS_SPACE((token[6]))) {
if (readMatFn) {
token += 7;
std::vector<std::string> filenames;
SplitString(std::string(token), ' ', filenames);
if (filenames.empty()) {
if (warn) {
(*warn) +=
"Looks like empty filename for mtllib. Use default "
"material. \n";
}
} else {
bool found = false;
for (size_t s = 0; s < filenames.size(); s++) {
std::string warn_mtl;
std::string err_mtl;
bool ok = (*readMatFn)(filenames[s].c_str(), &materials,
&material_map, &warn_mtl, &err_mtl);
if (warn && (!warn_mtl.empty())) {
(*warn) += warn_mtl; // This should be warn message.
}
if (err && (!err_mtl.empty())) {
(*err) += err_mtl;
}
if (ok) {
found = true;
break;
}
}
if (!found) {
if (warn) {
(*warn) +=
"Failed to load material file(s). Use default "
"material.\n";
}
} else {
if (callback.mtllib_cb) {
callback.mtllib_cb(user_data, &materials.at(0),
static_cast<int>(materials.size()));
}
}
}
}
continue;
}
// group name
if (token[0] == 'g' && IS_SPACE((token[1]))) {
names.clear();
while (!IS_NEW_LINE(token[0])) {
std::string str = parseString(&token);
names.push_back(str);
token += strspn(token, " \t\r"); // skip tag
}
assert(names.size() > 0);
if (callback.group_cb) {
if (names.size() > 1) {
// create const char* array.
names_out.resize(names.size() - 1);
for (size_t j = 0; j < names_out.size(); j++) {
names_out[j] = names[j + 1].c_str();
}
callback.group_cb(user_data, &names_out.at(0),
static_cast<int>(names_out.size()));
} else {
callback.group_cb(user_data, NULL, 0);
}
}
continue;
}
// object name
if (token[0] == 'o' && IS_SPACE((token[1]))) {
// @todo { multiple object name? }
token += 2;
std::stringstream ss;
ss << token;
std::string object_name = ss.str();
if (callback.object_cb) {
callback.object_cb(user_data, object_name.c_str());
}
continue;
}
#if 0 // @todo
if (token[0] == 't' && IS_SPACE(token[1])) {
tag_t tag;
token += 2;
std::stringstream ss;
ss << token;
tag.name = ss.str();
token += tag.name.size() + 1;
tag_sizes ts = parseTagTriple(&token);
tag.intValues.resize(static_cast<size_t>(ts.num_ints));
for (size_t i = 0; i < static_cast<size_t>(ts.num_ints); ++i) {
tag.intValues[i] = atoi(token);
token += strcspn(token, "/ \t\r") + 1;
}
tag.floatValues.resize(static_cast<size_t>(ts.num_reals));
for (size_t i = 0; i < static_cast<size_t>(ts.num_reals); ++i) {
tag.floatValues[i] = parseReal(&token);
token += strcspn(token, "/ \t\r") + 1;
}
tag.stringValues.resize(static_cast<size_t>(ts.num_strings));
for (size_t i = 0; i < static_cast<size_t>(ts.num_strings); ++i) {
std::stringstream ss;
ss << token;
tag.stringValues[i] = ss.str();
token += tag.stringValues[i].size() + 1;
}
tags.push_back(tag);
}
#endif
// Ignore unknown command.
}
if (err) {
(*err) += errss.str();
}
return true;
}
bool ObjReader::ParseFromFile(const std::string &filename,
const ObjReaderConfig &config) {
std::string mtl_search_path;
if (config.mtl_search_path.empty()) {
//
// split at last '/'(for unixish system) or '\\'(for windows) to get
// the base directory of .obj file
//
size_t pos = filename.find_last_of("/\\");
if (pos != std::string::npos) {
mtl_search_path = filename.substr(0, pos);
}
} else {
mtl_search_path = config.mtl_search_path;
}
valid_ = LoadObj(&attrib_, &shapes_, &materials_, &warning_, &error_,
filename.c_str(), mtl_search_path.c_str(),
config.triangulate, config.vertex_color);
return valid_;
}
bool ObjReader::ParseFromString(const std::string &obj_text,
const std::string &mtl_text,
const ObjReaderConfig &config) {
std::stringbuf obj_buf(obj_text);
std::stringbuf mtl_buf(mtl_text);
std::istream obj_ifs(&obj_buf);
std::istream mtl_ifs(&mtl_buf);
MaterialStreamReader mtl_ss(mtl_ifs);
valid_ = LoadObj(&attrib_, &shapes_, &materials_, &warning_, &error_,
&obj_ifs, &mtl_ss, config.triangulate, config.vertex_color);
return valid_;
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace tinyobj
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/vertices.cu | CUDA | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "vertices.h"
__forceinline__ __device__ float triangle_wave( float x, float shift = 0.f, float period = 2.f * M_PI, float amplitude = 1.f )
{
return fabsf( fmodf( ( 4.f / period ) * ( x - shift ), 4.f * amplitude ) - 2.f * amplitude ) - amplitude;
}
__forceinline__ __device__ void write_animated_triangle( float3* out_vertices, int tidx, float3 v0, float3 v1, float3 v2, AnimationMode mode, float time )
{
float3 v = make_float3( 0 );
if( mode == AnimationMode_Explode )
{
// Generate displacement vector from triangle index
const float theta = ( (float)M_PI * ( ( tidx + 1 ) * ( 13 / M_PI ) ) );
const float phi = ( (float)( 2.0 * M_PI ) * ( ( tidx + 1 ) * ( 97 / M_PI ) ) );
// Apply displacement to the sphere triangles
v = make_float3( triangle_wave( phi ) * triangle_wave( theta, M_PI / 2.f ),
triangle_wave( phi, M_PI / 2.f ) * triangle_wave( theta, M_PI / 2.f ), triangle_wave( theta ) )
* triangle_wave( time, M_PI / 2.f ) * 2.f;
}
out_vertices[tidx * 3 + 0] = v0 + v;
out_vertices[tidx * 3 + 1] = v1 + v;
out_vertices[tidx * 3 + 2] = v2 + v;
}
__forceinline__ __device__ float3 deform_vertex( const float3& c, AnimationMode mode, float time )
{
// Apply sine wave to the y coordinate of the sphere vertices
if( mode == AnimationMode_Deform )
return make_float3( c.x, c.y * ( 0.5f + 0.4f * cosf( 4 * ( c.x + time ) ) ), c.z );
return c;
}
extern "C" __global__ void generate_vertices(float3* out_vertices, AnimationMode mode, float time, int width, int height)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < width * height )
{
// generate a single patch (two unindexed triangles) of a tessellated sphere
int x = idx % width;
int y = idx / width;
const float theta0 = ( ( float )M_PI * ( y + 0 ) ) / height;
const float theta1 = ( ( float )M_PI * ( y + 1 ) ) / height;
const float phi0 = ( ( float )( 2.0 * M_PI ) * ( x + 0 ) ) / width;
const float phi1 = ( ( float )( 2.0 * M_PI ) * ( x + 1 ) ) / width;
const float ct0 = cosf( theta0 );
const float st0 = sinf( theta0 );
const float ct1 = cosf( theta1 );
const float st1 = sinf( theta1 );
const float cp0 = cosf( phi0 );
const float sp0 = sinf( phi0 );
const float cp1 = cosf( phi1 );
const float sp1 = sinf( phi1 );
const float3 v00 = deform_vertex( make_float3( cp0 * st0, sp0 * st0, ct0 ), mode, time );
const float3 v10 = deform_vertex( make_float3( cp0 * st1, sp0 * st1, ct1 ), mode, time );
const float3 v01 = deform_vertex( make_float3( cp1 * st0, sp1 * st0, ct0 ), mode, time );
const float3 v11 = deform_vertex( make_float3( cp1 * st1, sp1 * st1, ct1 ), mode, time );
write_animated_triangle( out_vertices, idx * 2 + 0, v00, v10, v11, mode, time );
write_animated_triangle( out_vertices, idx * 2 + 1, v00, v11, v01, mode, time );
}
}
extern "C" __host__ void
generateAnimatedVetrices( float3* out_vertices, AnimationMode animation_mode, float time, int width, int height )
{
dim3 threadsPerBlock( 128, 1 );
int numBlocks = ( width * height ) / threadsPerBlock.x;
generate_vertices <<< numBlocks, threadsPerBlock >>> ( out_vertices, animation_mode, time, width, height );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMotionGeometry/vertices.h | C/C++ Header | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <cuda.h>
#include <sutil/vec_math.h>
enum AnimationMode
{
AnimationMode_None,
AnimationMode_Deform,
AnimationMode_Explode
};
extern "C" __host__ void generateAnimatedVetrices( float3* out_vertices, AnimationMode animation_mode, float time, int width, int height );
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMultiGPU/optixMultiGPU.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/Camera.h>
#include <sutil/Trackball.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <GLFW/glfw3.h>
#include "sutil/WorkDistribution.h"
#include "optixMultiGPU.h"
#include <array>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <string>
//
// TODO:
// * Use Accel relocation for building instead of independent build per GPU
// * Add cmd line args for specifying desired devices
//
//
bool resize_dirty = false;
bool minimized = true;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int2 mouse_prev_pos;
int32_t mouse_button = -1;
int32_t width = 768;
int32_t height = 768;
int32_t samples_per_launch = 16;
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" void fillSamplesCUDA(
int32_t num_samples,
cudaStream_t stream,
int32_t gpu_idx,
int32_t num_gpus,
int32_t width,
int32_t height,
int2* samples );
//------------------------------------------------------------------------------
//
// Local types
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct Vertex
{
float x, y, z, pad;
};
struct IndexedTriangle
{
uint32_t v1, v2, v3, pad;
};
struct Instance
{
float transform[12];
};
struct PathTracerState
{
int32_t device_idx = -1;
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_gas_output_buffer = 0; // Triangle AS memory
CUdeviceptr d_vertices = 0;
OptixModule ptx_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup radiance_miss_group = 0;
OptixProgramGroup occlusion_miss_group = 0;
OptixProgramGroup radiance_hit_group = 0;
OptixProgramGroup occlusion_hit_group = 0;
OptixShaderBindingTable sbt = {};
int32_t num_samples = 0;
int2* d_sample_indices = 0;
float4* d_sample_accum = 0;
Params params;
Params* d_params;
CUstream stream = 0;
};
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t TRIANGLE_COUNT = 32;
const int32_t MAT_COUNT = 4;
const static std::array<Vertex, TRIANGLE_COUNT*3> g_vertices =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
// Left wall -- red lambert
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
// Short block -- white lambert
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
// Tall block -- white lambert
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
// Ceiling light -- emmissive
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 332.0f, 0.0f }
} };
static std::array<uint32_t, TRIANGLE_COUNT> g_mat_indices =
{ {
0, 0, // Floor -- white lambert
0, 0, // Ceiling -- white lambert
0, 0, // Back wall -- white lambert
1, 1, // Right wall -- green lambert
2, 2, // Left wall -- red lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Short block -- white lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Tall block -- white lambert
3, 3 // Ceiling light -- emmissive
} };
const std::array<float3, MAT_COUNT> g_emission_colors =
{ {
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 15.0f, 15.0f, 5.0f }
} };
const std::array<float3, MAT_COUNT> g_diffuse_colors =
{ {
{ 0.80f, 0.80f, 0.80f },
{ 0.05f, 0.80f, 0.05f },
{ 0.80f, 0.05f, 0.05f },
{ 0.50f, 0.00f, 0.00f }
} };
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking(static_cast<int>( xpos ), static_cast<int>( ypos ));
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
width = res_x;
height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q ||
key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
//------------------------------------------------------------------------------
//
// Helper functions
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cerr << "Usage : " << argv0 << " [options]\n";
std::cerr << "Options: --file | -f <filename> File for image output\n";
std::cerr << " --launch-samples | -s Number of samples per pixel per launch (default 16)\n";
std::cerr << " --help | -h Print this usage message\n";
exit( 0 );
}
void initCameraState()
{
camera.setEye( make_float3( 278.0f, 273.0f, -900.0f ) );
camera.setLookat( make_float3( 278.0f, 273.0f, 330.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ), make_float3( 0.0f, 0.0f, 1.0f ), make_float3( 0.0f, 1.0f, 0.0f ) );
trackball.setGimbalLock(true);
}
void initLaunchParams( PathTracerState& state )
{
state.params.subframe_index = 0u;
state.params.width = width;
state.params.height = height;
state.params.samples_per_launch = samples_per_launch;
state.params.device_idx = state.device_idx;
state.params.light.emission = make_float3( 15.0f, 15.0f, 5.0f );
state.params.light.corner = make_float3( 343.0f, 548.5f, 227.0f );
state.params.light.v1 = make_float3( 0.0f, 0.0f, 105.0f );
state.params.light.v2 = make_float3( -130.0f, 0.0f, 0.0f );
state.params.light.normal = normalize ( cross( state.params.light.v1, state.params.light.v2) );
state.params.handle = state.gas_handle;
// IO buffers are assigned in allocIOBuffers
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_params), sizeof( Params ) ) );
}
void allocIOBuffers( PathTracerState& state, int num_gpus )
{
StaticWorkDistribution wd;
wd.setRasterSize( width, height );
wd.setNumGPUs( num_gpus );
state.num_samples = wd.numSamples( state.device_idx );
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_sample_indices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_sample_accum ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_sample_indices ), state.num_samples*sizeof( int2 ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_sample_accum ), state.num_samples*sizeof( float4 ) ) );
state.params.sample_index_buffer = state.d_sample_indices;
state.params.sample_accum_buffer = state.d_sample_accum;
state.params.result_buffer = 0; // Managed by CUDAOutputBuffer
fillSamplesCUDA(
state.num_samples,
state.stream,
state.device_idx,
num_gpus,
width,
height,
state.d_sample_indices
);
}
void handleCameraUpdate( std::vector<PathTracerState>& states )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( width ) / static_cast<float>( height ) );
float3 u, v, w;
camera.UVWFrame( u, v, w );
for( auto& state : states )
{
state.params.eye = camera.eye();
state.params.U = u;
state.params.V = v;
state.params.W = w;
}
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PathTracerState>& states )
{
if( !resize_dirty )
return;
resize_dirty = false;
CUDA_CHECK( cudaSetDevice( states.front().device_idx ) );
output_buffer.resize( width, height );
// Realloc accumulation buffer
for( auto& state : states )
{
state.params.width = width;
state.params.height = height;
allocIOBuffers( state, static_cast<int>( states.size() ) );
}
}
void updateState( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PathTracerState>& states )
{
// Update params on device
if( camera_changed || resize_dirty )
for( auto& state : states )
state.params.subframe_index = 0;
handleCameraUpdate( states );
handleResize( output_buffer, states );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PathTracerState>& states )
{
uchar4* result_buffer_data = output_buffer.map();
for( auto& state : states )
{
// Launch
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
state.params.result_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( state.d_params ),
&state.params,
sizeof( Params ),
cudaMemcpyHostToDevice,
state.stream
) );
OPTIX_CHECK( optixLaunch(
state.pipeline,
state.stream,
reinterpret_cast<CUdeviceptr>( state.d_params ),
sizeof( Params ),
&state.sbt,
state.num_samples, // launch width
1, // launch height
1 // launch depth
) );
}
output_buffer.unmap();
for( auto& state : states )
{
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_SYNC_CHECK();
}
}
void displaySubframe(
sutil::CUDAOutputBuffer<uchar4>& output_buffer,
sutil::GLDisplay& gl_display,
GLFWwindow* window )
{
// Display
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
gl_display.display(
width,
height,
framebuf_res_x,
framebuf_res_y,
output_buffer.getPBO()
);
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */)
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
void createContext( PathTracerState& state )
{
// Initialize CUDA on this device
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cuCtx = 0; // zero means take the current context
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
state.context = context;
CUDA_CHECK( cudaStreamCreate( &state.stream ) );
}
void createContexts( std::vector<PathTracerState>& state )
{
OPTIX_CHECK( optixInit() );
int32_t device_count = 0;
CUDA_CHECK( cudaGetDeviceCount( &device_count ) );
state.resize( device_count );
std::cout << "Total GPUs visible: " << device_count << std::endl;
cudaDeviceProp prop;
for( int i = 0; i < device_count; ++i )
{
state[i].device_idx = i;
CUDA_CHECK( cudaGetDeviceProperties ( &prop, i ) );
CUDA_CHECK( cudaSetDevice( i ) );
std::cout << "\t[" << i << "]: " << prop.name << std::endl;
createContext( state[i] );
}
}
void buildMeshAccel( PathTracerState& state )
{
//
// copy mesh data to device
//
const size_t vertices_size_in_bytes = g_vertices.size() * sizeof( Vertex );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_vertices ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( state.d_vertices ),
g_vertices.data(),
vertices_size_in_bytes,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_mat_indices = 0;
const size_t mat_indices_size_in_bytes = g_mat_indices.size() * sizeof( uint32_t );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_mat_indices ),
mat_indices_size_in_bytes
) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_mat_indices),
g_mat_indices.data(),
mat_indices_size_in_bytes,
cudaMemcpyHostToDevice
) );
//
// Build triangle GAS
//
uint32_t triangle_input_flags[MAT_COUNT] =
{
// One per SBT record for this build input
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT
};
OptixBuildInput triangle_input = {};
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes = sizeof( Vertex );
triangle_input.triangleArray.numVertices = static_cast<uint32_t>( g_vertices.size() );
triangle_input.triangleArray.vertexBuffers = &state.d_vertices;
triangle_input.triangleArray.flags = triangle_input_flags;
triangle_input.triangleArray.numSbtRecords = MAT_COUNT;
triangle_input.triangleArray.sbtIndexOffsetBuffer = d_mat_indices;
triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = sizeof(uint32_t);
triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = sizeof(uint32_t);
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
state.context,
&accel_options,
&triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_temp_buffer ),
gas_buffer_sizes.tempSizeInBytes
) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild(
state.context,
0, // CUDA stream
&accel_options,
&triangle_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_mat_indices ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof(size_t), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( state.context, 0, state.gas_handle, state.d_gas_output_buffer,
compacted_gas_size, &state.gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createModule( PathTracerState& state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
state.pipeline_compile_options.usesMotionBlur = false;
state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
state.pipeline_compile_options.numPayloadValues = 2;
state.pipeline_compile_options.numAttributeValues = 2;
state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE; // should be OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixMultiGPU.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
state.context,
&module_compile_options,
&state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&state.ptx_module
) );
}
void createProgramGroups( PathTracerState& state )
{
OptixProgramGroupOptions program_group_options = {};
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.raygen_prog_group
)
);
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__radiance";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.radiance_miss_group
)
);
memset( &miss_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = nullptr; // NULL miss program for occlusion rays
miss_prog_group_desc.miss.entryFunctionName = nullptr;
OPTIX_CHECK_LOG( optixProgramGroupCreate(
state.context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_miss_group
)
);
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
OPTIX_CHECK_LOG(
optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.radiance_hit_group
)
);
memset( &hit_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
OPTIX_CHECK(
optixProgramGroupCreate(
state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&state.occlusion_hit_group
)
);
}
void createPipeline( PathTracerState& state )
{
const uint32_t max_trace_depth = 2;
OptixProgramGroup program_groups[] =
{
state.raygen_prog_group,
state.radiance_miss_group,
state.occlusion_miss_group,
state.radiance_hit_group,
state.occlusion_hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
state.context,
&state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&state.pipeline
) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
void createSBT( PathTracerState& state )
{
CUdeviceptr d_raygen_record;
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_raygen_record ), raygen_record_size ) );
RayGenRecord rg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( state.raygen_prog_group, &rg_sbt ) );
rg_sbt.data = {1.0f, 0.f, 0.f};
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_raygen_record ),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_miss_records;
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_miss_records ), miss_record_size*RAY_TYPE_COUNT ) );
MissRecord ms_sbt[2];
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_miss_group, &ms_sbt[0] ) );
ms_sbt[0].data = {0.0f, 0.0f, 0.0f};
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_miss_group, &ms_sbt[1] ) );
ms_sbt[1].data = {0.0f, 0.0f, 0.0f};
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_miss_records ),
ms_sbt,
miss_record_size*RAY_TYPE_COUNT,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_hitgroup_records;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_hitgroup_records ),
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT
) );
HitGroupRecord hitgroup_records[ RAY_TYPE_COUNT*MAT_COUNT ];
for( int i = 0; i < MAT_COUNT; ++i )
{
{
const int sbt_idx = i*RAY_TYPE_COUNT+0; // SBT for radiance ray-type for ith material
OPTIX_CHECK( optixSbtRecordPackHeader( state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.emission_color = g_emission_colors[i];
hitgroup_records[ sbt_idx ].data.diffuse_color = g_diffuse_colors[i];
hitgroup_records[ sbt_idx ].data.vertices = reinterpret_cast<float4*>(state.d_vertices);
}
{
const int sbt_idx = i*RAY_TYPE_COUNT+1; // SBT for occlusion ray-type for ith material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
}
}
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_hitgroup_records ),
hitgroup_records,
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT,
cudaMemcpyHostToDevice
) );
state.sbt.raygenRecord = d_raygen_record;
state.sbt.missRecordBase = d_miss_records;
state.sbt.missRecordStrideInBytes = static_cast<uint32_t>( miss_record_size );
state.sbt.missRecordCount = RAY_TYPE_COUNT;
state.sbt.hitgroupRecordBase = d_hitgroup_records;
state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>( hitgroup_record_size );
state.sbt.hitgroupRecordCount = RAY_TYPE_COUNT*MAT_COUNT;
}
void cleanupState( PathTracerState& state )
{
OPTIX_CHECK( optixPipelineDestroy ( state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy ( state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( state.radiance_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( state.occlusion_hit_group ) );
OPTIX_CHECK( optixModuleDestroy ( state.ptx_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_sample_indices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_sample_accum ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
int main( int argc, char* argv[] )
{
//
// Parse command line options
//
std::string outfile;
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
outfile = argv[++i];
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else
{
std::cerr << "Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
try
{
initCameraState();
std::vector<PathTracerState> states;
createContexts( states );
//
// Set up OptiX state
//
for( auto& state : states )
{
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
buildMeshAccel ( state );
createModule ( state );
createProgramGroups( state );
createPipeline ( state );
createSBT ( state );
allocIOBuffers ( state, static_cast<int>( states.size() ) );
}
for( auto& state : states )
initLaunchParams( state );
if( outfile.empty() )
{
GLFWwindow* window = sutil::initUI( "optixMultiGPU", width, height );
glfwSetMouseButtonCallback ( window, mouseButtonCallback );
glfwSetCursorPosCallback ( window, cursorPosCallback );
glfwSetWindowSizeCallback ( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback ( window, keyCallback );
//
// Render loop
//
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( sutil::CUDAOutputBufferType::ZERO_COPY, width, height );
output_buffer.setDevice( 0 );
sutil::GLDisplay gl_display;
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateState( output_buffer, states );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, states );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1-t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers(window);
for( auto& state : states )
++state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
for( auto& state : states )
{
CUDA_CHECK( cudaSetDevice( state.device_idx ) );
CUDA_SYNC_CHECK();
}
}
sutil::cleanupUI( window );
}
else
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( sutil::CUDAOutputBufferType::ZERO_COPY, width, height );
output_buffer.setDevice( 0 );
updateState( output_buffer, states );
launchSubframe( output_buffer, states );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( outfile.c_str(), buffer, false );
}
for( auto& state : states )
cleanupState( state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMultiGPU/optixMultiGPU.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixMultiGPU.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda/random.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if( fabs(m_normal.x) > fabs(m_normal.z) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) );
}
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ float3 deviceColor( unsigned int idx )
{
return make_float3(
idx == 0 ? 0.05f : 0.0f,
idx == 1 ? 0.05f : 0.0f,
idx == 2 ? 0.05f : 0.0f
);
}
//------------------------------------------------------------------------------
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const uint3 launch_idx = optixGetLaunchIndex();
const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ];
// Work distribution might assign tiles that cross over image boundary
if( pixel_idx.x > w-1 || pixel_idx.y > h-1 )
return;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f);
const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x;
params.result_buffer[ image_index ] = make_color ( accum_color + deviceColor( params.device_idx ) );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
RadiancePRD* prd = getPRD();
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - P );
const float3 L = normalize(light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length(cross(light.v1, light.v2));
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMultiGPU/optixMultiGPU.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT
};
struct ParallelogramLight
{
float3 corner;
float3 v1, v2;
float3 normal;
float3 emission;
};
struct Params
{
unsigned int subframe_index;
int2* sample_index_buffer;
float4* sample_accum_buffer;
uchar4* result_buffer;
unsigned int width;
unsigned int height;
unsigned int samples_per_launch;
unsigned int device_idx;
float3 eye;
float3 U;
float3 V;
float3 W;
ParallelogramLight light; // TODO: make light list
OptixTraversableHandle handle;
};
struct RayGenData
{
float r, g, b;
};
struct MissData
{
float r, g, b;
};
struct HitGroupData
{
float3 emission_color;
float3 diffuse_color;
float4* vertices;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixMultiGPU/optixMultiGPU_kernels.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "sutil/WorkDistribution.h"
extern "C" __global__ void fillSamples(
int gpu_idx,
int num_gpus,
int width,
int height,
int2* sample_indices )
{
StaticWorkDistribution wd;
wd.setRasterSize( width, height );
wd.setNumGPUs( num_gpus );
const int sample_idx = blockIdx.x;
sample_indices[sample_idx] = wd.getSamplePixel( gpu_idx, sample_idx );
}
extern "C" __host__ void fillSamplesCUDA(
int num_samples,
cudaStream_t stream,
int gpu_idx,
int num_gpus,
int width,
int height,
int2* sample_indices )
{
fillSamples<<<num_samples, 1, 0, stream>>>(
gpu_idx,
num_gpus,
width,
height,
sample_indices );
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixNVLink/optixNVLink.cpp | C++ | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
/*
Sample Description:
If mutliple GPUs are present with nvlink, or other peer-to-peer access, between
them, then this connectivity can be used in several ways. This sample
demonstrates two of these:
Read-only buffers: nvlink capability divides the available GPUs into
nvlink "islands". Since the connection is so fast, some readable resources
can be shared so that one copy is held per island. Typically, these
resources will be spread across the GPUs of an island so that the memory
burden is shared. This sample shares textures so that only one copy is
held per nvlink island.
The Frame buffer: (1) In the single-GPU case, a gl interop buffer can
be used to avoid data copies to OpenGL for display. (2) When multiple GPUs are
used which are not all nvlink connected, zero-copy memory, which transfers data
through the host, is typically used for the frame buffer. (3) When all of
GPUs reside in a single nvlink island, the link can be used to transfer
frame buffer data so that it does not need to be transferred through the
host. This sample demonstrates all three of these techniques.
*/
#include <algorithm>
#include <array>
#include <cfloat>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <string>
#include <vector>
#include <glad/glad.h> // Needs to be included before gl_interop
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <nvml_configure.h> // configured file to tell if we have nvml
#if OPTIX_USE_NVML
#include <nvml.h>
#endif
#include <optix.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <sampleConfig.h>
#include <sutil/Camera.h>
#include <sutil/Trackball.h>
#include <sutil/CUDAOutputBuffer.h>
#include <sutil/Exception.h>
#include <sutil/GLDisplay.h>
#include <sutil/Matrix.h>
#include <sutil/sutil.h>
#include <sutil/vec_math.h>
#include <sutil/WorkDistribution.h>
#include <GLFW/glfw3.h>
#include "optixNVLink.h"
//------------------------------------------------------------------------------
//
// Variables related to display
//
//------------------------------------------------------------------------------
bool resize_dirty = false;
bool minimized = false;
// Camera state
bool camera_changed = true;
sutil::Camera camera;
sutil::Trackball trackball;
// Mouse state
int2 mouse_prev_pos;
int32_t mouse_button = -1;
int32_t width = 768;
int32_t height = 768;
int32_t samples_per_launch = 8;
// Output file name (empty means do not output a file)
std::string g_outfile = "";
// How to scale the device color overlay on the image (0 means do not show)
float g_device_color_scale = 1.0f;
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" void fillSamplesCUDA(
int32_t num_samples,
cudaStream_t stream,
int32_t gpu_idx,
int32_t num_gpus,
int32_t width,
int32_t height,
int2* samples );
//------------------------------------------------------------------------------
//
// Local types
//
//------------------------------------------------------------------------------
template <typename T>
struct Record
{
__align__( OPTIX_SBT_RECORD_ALIGNMENT ) char header[OPTIX_SBT_RECORD_HEADER_SIZE];
T data;
};
typedef Record<RayGenData> RayGenRecord;
typedef Record<MissData> MissRecord;
typedef Record<HitGroupData> HitGroupRecord;
struct Vertex
{
float x, y, z, pad;
};
struct TexCoord
{
float s, t;
};
struct IndexedTriangle
{
uint32_t v1, v2, v3, pad;
};
struct Instance
{
float transform[12];
};
struct PerDeviceSampleState
{
int32_t device_idx = -1;
OptixDeviceContext context = 0;
OptixTraversableHandle gas_handle = 0; // Traversable handle for triangle AS
CUdeviceptr d_gas_output_buffer = 0; // Triangle AS memory
CUdeviceptr d_vertices = 0;
CUdeviceptr d_tex_coords = 0;
OptixModule ptx_module = 0;
OptixPipelineCompileOptions pipeline_compile_options = {};
OptixPipeline pipeline = 0;
OptixProgramGroup raygen_prog_group = 0;
OptixProgramGroup radiance_miss_group = 0;
OptixProgramGroup occlusion_miss_group = 0;
OptixProgramGroup radiance_hit_group = 0;
OptixProgramGroup occlusion_hit_group = 0;
OptixShaderBindingTable sbt = {};
int32_t num_samples = 0;
int2* d_sample_indices = 0;
float4* d_sample_accum = 0;
Params params;
Params* d_params;
CUstream stream = 0;
uint32_t peers = 0;
};
//------------------------------------------------------------------------------
//
// Forward declarations
//
//------------------------------------------------------------------------------
cudaTextureObject_t getDiffuseTextureObject( int material_id, PerDeviceSampleState& pd_state );
//------------------------------------------------------------------------------
//
// Load NVML dynamically
//
//------------------------------------------------------------------------------
#if OPTIX_USE_NVML
bool g_nvmlLoaded = false;
#ifdef WIN32
#define APIFUNC FAR WINAPI
#else
#define APIFUNC
#endif
typedef nvmlReturn_t (APIFUNC *NVML_INIT_TYPE)();
NVML_INIT_TYPE nvmlInit_p;
typedef nvmlReturn_t (APIFUNC *NVML_DEVICE_GET_HANDLE_BY_INDEX_TYPE)( unsigned int, nvmlDevice_t* );
NVML_DEVICE_GET_HANDLE_BY_INDEX_TYPE nvmlDeviceGetHandleByIndex_p;
typedef nvmlReturn_t (APIFUNC *NVML_DEVICE_GET_PCI_INFO_TYPE)( nvmlDevice_t, nvmlPciInfo_t* );
NVML_DEVICE_GET_PCI_INFO_TYPE nvmlDeviceGetPciInfo_p;
typedef nvmlReturn_t (APIFUNC *NVML_DEVICE_GET_NVLINK_CAPABILITY_TYPE)( nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned int* );
NVML_DEVICE_GET_NVLINK_CAPABILITY_TYPE nvmlDeviceGetNvLinkCapability_p;
typedef nvmlReturn_t (APIFUNC *NVML_DEVICE_GET_NVLINK_STATE_TYPE)( nvmlDevice_t, unsigned int, nvmlEnableState_t* );
NVML_DEVICE_GET_NVLINK_STATE_TYPE nvmlDeviceGetNvLinkState_p;
typedef nvmlReturn_t (APIFUNC *NVML_DEVICE_GET_NVLINK_REMOTE_PCI_INFO_TYPE)( nvmlDevice_t, unsigned int, nvmlPciInfo_t* );
NVML_DEVICE_GET_NVLINK_REMOTE_PCI_INFO_TYPE nvmlDeviceGetNvLinkRemotePciInfo_p;
typedef nvmlReturn_t (APIFUNC *NVML_SYSTEM_GET_DRIVER_VERSION_TYPE)( char*, unsigned int );
NVML_SYSTEM_GET_DRIVER_VERSION_TYPE nvmlSystemGetDriverVersion_p;
#ifdef WIN32
void* loadDllHandle( const char* dllName )
{
void* dllHandle = optixLoadWindowsDllFromName( dllName );
return dllHandle;
}
void* getProcedureAddress( void* dllHandle, const char* funcName )
{
void* proc = GetProcAddress( (HMODULE)dllHandle, funcName );
if ( proc == NULL )
std::cerr << funcName << " not found\n";
return proc;
}
#else
void* loadSharedObjectHandle( const char* soName )
{
void* soHandle = dlopen( soName, RTLD_NOW );
return soHandle;
}
void* getProcedureAddress( void* handlePtr, const char* funcName )
{
void* proc = dlsym( handlePtr, funcName );
if( !proc )
std::cerr << funcName << " not found\n";
return proc;
}
#endif
static bool loadNvmlFunctions()
{
// Load the library
#ifdef WIN32
const char* soName = "nvml.dll";
void* handle = loadDllHandle( soName );
#else
const char* soName = "libnvidia-ml.so";
void* handle = loadSharedObjectHandle( soName );
#endif
if ( !handle )
{
std::cout << "UNABLE TO LOAD " << soName << "\n";
return false;
}
// Set the individual _ptions we are using
nvmlInit_p = reinterpret_cast<NVML_INIT_TYPE>( getProcedureAddress( handle, "nvmlInit" ) );
nvmlDeviceGetHandleByIndex_p = reinterpret_cast<NVML_DEVICE_GET_HANDLE_BY_INDEX_TYPE>( getProcedureAddress( handle, "nvmlDeviceGetHandleByIndex" ) );
nvmlDeviceGetPciInfo_p = reinterpret_cast<NVML_DEVICE_GET_PCI_INFO_TYPE>( getProcedureAddress( handle, "nvmlDeviceGetPciInfo" ) );
nvmlDeviceGetNvLinkCapability_p = reinterpret_cast<NVML_DEVICE_GET_NVLINK_CAPABILITY_TYPE>( getProcedureAddress( handle, "nvmlDeviceGetNvLinkCapability" ) );
nvmlDeviceGetNvLinkState_p = reinterpret_cast<NVML_DEVICE_GET_NVLINK_STATE_TYPE>( getProcedureAddress( handle, "nvmlDeviceGetNvLinkState" ) );
nvmlDeviceGetNvLinkRemotePciInfo_p = reinterpret_cast<NVML_DEVICE_GET_NVLINK_REMOTE_PCI_INFO_TYPE>( getProcedureAddress( handle, "nvmlDeviceGetNvLinkRemotePciInfo" ) );
nvmlSystemGetDriverVersion_p = reinterpret_cast<NVML_SYSTEM_GET_DRIVER_VERSION_TYPE>( getProcedureAddress( handle, "nvmlSystemGetDriverVersion" ) );
std::cout << "LOADED " << soName << "\n";
return true;
}
#endif // OPTIX_USE_NVML
//------------------------------------------------------------------------------
//
// Scene data
//
//------------------------------------------------------------------------------
const int32_t TRIANGLE_COUNT = 32;
const int32_t MAT_COUNT = 4;
const static std::array<Vertex, TRIANGLE_COUNT*3> g_vertices =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 0.0f, 0.0f },
{ 0.0f, 548.8f, 559.2f, 0.0f },
{ 0.0f, 0.0f, 559.2f, 0.0f },
// Left wall -- red lambert
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 0.0f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 0.0f, 0.0f, 0.0f },
{ 556.0f, 548.8f, 559.2f, 0.0f },
{ 556.0f, 548.8f, 0.0f, 0.0f },
// Short block -- white lambert
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 242.0f, 165.0f, 274.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 290.0f, 165.0f, 114.0f, 0.0f },
{ 290.0f, 0.0f, 114.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
{ 130.0f, 165.0f, 65.0f, 0.0f },
{ 130.0f, 0.0f, 65.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 240.0f, 165.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 240.0f, 0.0f, 272.0f, 0.0f },
{ 82.0f, 165.0f, 225.0f, 0.0f },
{ 82.0f, 0.0f, 225.0f, 0.0f },
// Tall block -- white lambert
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 314.0f, 330.0f, 455.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 472.0f, 330.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 472.0f, 0.0f, 406.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 314.0f, 330.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 314.0f, 0.0f, 456.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 265.0f, 330.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 265.0f, 0.0f, 296.0f, 0.0f },
{ 423.0f, 330.0f, 247.0f, 0.0f },
{ 423.0f, 0.0f, 247.0f, 0.0f },
// Ceiling light -- emmissive
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 227.0f, 0.0f },
{ 213.0f, 548.6f, 332.0f, 0.0f },
{ 343.0f, 548.6f, 332.0f, 0.0f }
} };
const static std::array<TexCoord, TRIANGLE_COUNT*3> g_tex_coords =
{ {
// Floor -- white lambert
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
// Ceiling -- white lambert
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
// Back wall -- white lambert
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
// Right wall -- green lambert
{ 0.0f, 0.0f },
{ 0.0f, 1.0f },
{ 1.0f, 1.0f },
{ 0.0f, 0.0f },
{ 1.0f, 1.0f },
{ 1.0f, 0.0f },
// Left wall -- red lambert
{ 0.0f, 0.0f },
{ 1.0f, 0.0f },
{ 1.0f, 1.0f },
{ 0.0f, 0.0f },
{ 1.0f, 1.0f },
{ 0.0f, 1.0f },
// Short block -- white lambert
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
// Tall block -- white lambert
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
// Ceiling light -- emmissive
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f },
{ 0.0f, 0.0f }
} };
static std::array<uint32_t, TRIANGLE_COUNT> g_mat_indices =
{ {
0, 0, // Floor -- white lambert
0, 0, // Ceiling -- white lambert
0, 0, // Back wall -- white lambert
1, 1, // Right wall -- green lambert
2, 2, // Left wall -- red lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Short block -- white lambert
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // Tall block -- white lambert
3, 3 // Ceiling light -- emmissive
} };
const std::array<float3, MAT_COUNT> g_emission_colors =
{ {
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.0f },
{ 15.0f, 15.0f, 5.0f }
} };
const std::array<float3, MAT_COUNT> g_diffuse_colors =
{ {
{ 0.80f, 0.80f, 0.80f },
{ 0.05f, 0.80f, 0.05f },
{ 0.80f, 0.05f, 0.05f },
{ 0.50f, 0.00f, 0.00f }
} };
//------------------------------------------------------------------------------
//
// Texture tracking
//
//------------------------------------------------------------------------------
// Materials for which textures will be made
std::array<int, MAT_COUNT> g_make_diffuse_textures = {0, 1, 1, 0};
// Backing storage for the textures. These will be shared per
// P2P island when sharing is enabled.
std::array<std::vector<cudaArray_t>, MAT_COUNT> g_diffuse_texture_data;
// Texture objects. Each device must have a texture object for
// each texture, but the backing stores can be shared.
std::array<std::vector<cudaTextureObject_t>, MAT_COUNT> g_diffuse_textures;
// Texture memory usage per device
std::vector<float> g_device_tex_usage;
// Kinds of connections between devices to accept as peers
const int PEERS_NONE = 0;
const int PEERS_NVLINK = 1;
const int PEERS_ALL = 2;
// Configuration decisions
int g_peer_usage = PEERS_NVLINK;
bool g_share_textures = true;
bool g_optimize_framebuffer = true;
const int TEXTURE_WIDTH = 1024;
//------------------------------------------------------------------------------
//
// GLFW callbacks
//
//------------------------------------------------------------------------------
static void mouseButtonCallback( GLFWwindow* window, int button, int action, int mods )
{
double xpos, ypos;
glfwGetCursorPos( window, &xpos, &ypos );
if( action == GLFW_PRESS )
{
mouse_button = button;
trackball.startTracking(static_cast<int>( xpos ), static_cast<int>( ypos ));
}
else
{
mouse_button = -1;
}
}
static void cursorPosCallback( GLFWwindow* window, double xpos, double ypos )
{
if( mouse_button == GLFW_MOUSE_BUTTON_LEFT )
{
trackball.setViewMode( sutil::Trackball::LookAtFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
else if( mouse_button == GLFW_MOUSE_BUTTON_RIGHT )
{
trackball.setViewMode( sutil::Trackball::EyeFixed );
trackball.updateTracking( static_cast<int>( xpos ), static_cast<int>( ypos ), width, height );
camera_changed = true;
}
}
static void windowSizeCallback( GLFWwindow* window, int32_t res_x, int32_t res_y )
{
// Keep rendering at the current resolution when the window is minimized.
if( minimized )
return;
// Output dimensions must be at least 1 in both x and y.
sutil::ensureMinimumSize( res_x, res_y );
width = res_x;
height = res_y;
camera_changed = true;
resize_dirty = true;
}
static void windowIconifyCallback( GLFWwindow* window, int32_t iconified )
{
minimized = ( iconified > 0 );
}
static void keyCallback( GLFWwindow* window, int32_t key, int32_t /*scancode*/, int32_t action, int32_t /*mods*/ )
{
if( action == GLFW_PRESS )
{
if( key == GLFW_KEY_Q ||
key == GLFW_KEY_ESCAPE )
{
glfwSetWindowShouldClose( window, true );
}
}
else if( key == GLFW_KEY_G )
{
// toggle UI draw
}
}
//------------------------------------------------------------------------------
//
// Helper functions
//
//------------------------------------------------------------------------------
void printUsageAndExit( const char* argv0 )
{
std::cout << "Usage : " << argv0 << " [options]\n";
std::cout << "Options: --launch-samples | -s Number of samples per pixel per launch (default 8)\n";
std::cout << " --file | -f Output file name\n";
std::cout << " --device-color-scale | -d Device color overlay scale (default 1.0)\n";
std::cout << " --peers | -p P2P connections to include [none, nvlink, all] (default nvlink)\n";
std::cout << " --optimize-framebuffer | -o Optimize the framebuffer for speed [true, false] (default true)\n";
std::cout << " --share-textures | -t Share textures if allowed [true, false] (default true)\n";
std::cout << " --help | -h Print this usage message\n";
exit( 0 );
}
void initCameraState()
{
camera.setEye( make_float3( 278.0f, 273.0f, -900.0f ) );
camera.setLookat( make_float3( 278.0f, 273.0f, 330.0f ) );
camera.setUp( make_float3( 0.0f, 1.0f, 0.0f ) );
camera.setFovY( 35.0f );
camera_changed = true;
trackball.setCamera( &camera );
trackball.setMoveSpeed( 10.0f );
trackball.setReferenceFrame( make_float3( 1.0f, 0.0f, 0.0f ), make_float3( 0.0f, 0.0f, 1.0f ), make_float3( 0.0f, 1.0f, 0.0f ) );
trackball.setGimbalLock(true);
}
void initLaunchParams( PerDeviceSampleState& pd_state )
{
pd_state.params.subframe_index = 0u;
pd_state.params.width = width;
pd_state.params.height = height;
pd_state.params.samples_per_launch = samples_per_launch;
pd_state.params.device_idx = pd_state.device_idx;
pd_state.params.light.emission = make_float3( 15.0f, 15.0f, 5.0f );
pd_state.params.light.corner = make_float3( 343.0f, 548.5f, 227.0f );
pd_state.params.light.v1 = make_float3( 0.0f, 0.0f, 105.0f );
pd_state.params.light.v2 = make_float3( -130.0f, 0.0f, 0.0f );
pd_state.params.light.normal = normalize ( cross( pd_state.params.light.v1, pd_state.params.light.v2) );
pd_state.params.handle = pd_state.gas_handle;
pd_state.params.device_color_scale = g_device_color_scale;
// IO buffers are assigned in allocIOBuffers
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_params), sizeof( Params ) ) );
}
void allocIOBuffers( PerDeviceSampleState& pd_state, int num_gpus )
{
StaticWorkDistribution wd;
wd.setRasterSize( width, height );
wd.setNumGPUs( num_gpus );
pd_state.num_samples = wd.numSamples( pd_state.device_idx );
CUDA_CHECK( cudaSetDevice( pd_state.device_idx ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_sample_indices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_sample_accum ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_sample_indices ), pd_state.num_samples*sizeof( int2 ) ) );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_sample_accum ), pd_state.num_samples*sizeof( float4 ) ) );
pd_state.params.sample_index_buffer = pd_state.d_sample_indices;
pd_state.params.sample_accum_buffer = pd_state.d_sample_accum;
pd_state.params.result_buffer = 0; // Managed by CUDAOutputBuffer
fillSamplesCUDA(
pd_state.num_samples,
pd_state.stream,
pd_state.device_idx,
num_gpus,
width,
height,
pd_state.d_sample_indices
);
}
void handleCameraUpdate( std::vector<PerDeviceSampleState>& pd_states )
{
if( !camera_changed )
return;
camera_changed = false;
camera.setAspectRatio( static_cast<float>( width ) / static_cast<float>( height ) );
float3 u, v, w;
camera.UVWFrame( u, v, w );
for( PerDeviceSampleState& pd_state : pd_states )
{
pd_state.params.eye = camera.eye();
pd_state.params.U = u;
pd_state.params.V = v;
pd_state.params.W = w;
}
}
void handleResize( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PerDeviceSampleState>& pd_states )
{
if( !resize_dirty )
return;
resize_dirty = false;
CUDA_CHECK( cudaSetDevice( pd_states.front().device_idx ) );
output_buffer.resize( width, height );
// Realloc accumulation buffer
for( PerDeviceSampleState& pd_state : pd_states )
{
pd_state.params.width = width;
pd_state.params.height = height;
allocIOBuffers( pd_state, static_cast<int>( pd_states.size() ) );
}
}
void updateDeviceStates( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PerDeviceSampleState>& pd_states )
{
// Update params on devices
if( camera_changed || resize_dirty )
for( PerDeviceSampleState& pd_state : pd_states )
pd_state.params.subframe_index = 0;
handleCameraUpdate( pd_states );
handleResize( output_buffer, pd_states );
}
void launchSubframe( sutil::CUDAOutputBuffer<uchar4>& output_buffer, std::vector<PerDeviceSampleState>& pd_states )
{
uchar4* result_buffer_data = output_buffer.map();
for( PerDeviceSampleState& pd_state : pd_states )
{
// Launch
pd_state.params.result_buffer = result_buffer_data;
CUDA_CHECK( cudaMemcpyAsync( reinterpret_cast<void*>( pd_state.d_params ),
&pd_state.params,
sizeof( Params ),
cudaMemcpyHostToDevice,
pd_state.stream
) );
OPTIX_CHECK( optixLaunch(
pd_state.pipeline,
pd_state.stream,
reinterpret_cast<CUdeviceptr>( pd_state.d_params ),
sizeof( Params ),
&pd_state.sbt,
pd_state.num_samples, // launch width
1, // launch height
1 // launch depth
) );
}
output_buffer.unmap();
for( PerDeviceSampleState& pd_state : pd_states )
{
CUDA_CHECK( cudaSetDevice( pd_state.device_idx ) );
CUDA_SYNC_CHECK();
}
}
void displaySubframe(
sutil::CUDAOutputBuffer<uchar4>& output_buffer,
sutil::GLDisplay& gl_display,
GLFWwindow* window )
{
int framebuf_res_x = 0; // The display's resolution (could be HDPI res)
int framebuf_res_y = 0; //
glfwGetFramebufferSize( window, &framebuf_res_x, &framebuf_res_y );
GLuint pbo = output_buffer.getPBO();
gl_display.display( width, height, framebuf_res_x, framebuf_res_y, pbo );
}
static void context_log_cb( unsigned int level, const char* tag, const char* message, void* /*cbdata */)
{
std::cerr << "[" << std::setw( 2 ) << level << "][" << std::setw( 12 ) << tag << "]: "
<< message << "\n";
}
void createContext( PerDeviceSampleState& pd_state )
{
// Initialize CUDA on this device
CUDA_CHECK( cudaFree( 0 ) );
OptixDeviceContext context;
CUcontext cuCtx = 0; // zero means take the current context
OptixDeviceContextOptions options = {};
options.logCallbackFunction = &context_log_cb;
options.logCallbackLevel = 4;
OPTIX_CHECK( optixDeviceContextCreate( cuCtx, &options, &context ) );
pd_state.context = context;
CUDA_CHECK( cudaStreamCreate( &pd_state.stream ) );
}
void createContexts( std::vector<PerDeviceSampleState>& pd_state )
{
OPTIX_CHECK( optixInit() );
int32_t device_count = 0;
CUDA_CHECK( cudaGetDeviceCount( &device_count ) );
pd_state.resize( device_count );
std::cout << "TOTAL VISIBLE GPUs: " << device_count << std::endl;
cudaDeviceProp prop;
for( int i = 0; i < device_count; ++i )
{
// note: the device index must be the same as the position in the state vector
pd_state[i].device_idx = i;
CUDA_CHECK( cudaGetDeviceProperties ( &prop, i ) );
CUDA_CHECK( cudaSetDevice( i ) );
std::cout << "GPU [" << i << "]: " << prop.name << std::endl;
createContext( pd_state[i] );
}
}
void uploadAdditionalShadingData( PerDeviceSampleState& pd_state )
{
// texture coordinates
const size_t tex_coords_size_in_bytes = g_tex_coords.size() * sizeof( TexCoord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_tex_coords ), tex_coords_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( pd_state.d_tex_coords ),
g_tex_coords.data(),
tex_coords_size_in_bytes,
cudaMemcpyHostToDevice
) );
}
void buildMeshAccel( PerDeviceSampleState& pd_state )
{
//
// copy mesh data to device
//
const size_t vertices_size_in_bytes = g_vertices.size() * sizeof( Vertex );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_vertices ), vertices_size_in_bytes ) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( pd_state.d_vertices ),
g_vertices.data(),
vertices_size_in_bytes,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_mat_indices = 0;
const size_t mat_indices_size_in_bytes = g_mat_indices.size() * sizeof( uint32_t );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_mat_indices ),
mat_indices_size_in_bytes
) );
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_mat_indices),
g_mat_indices.data(),
mat_indices_size_in_bytes,
cudaMemcpyHostToDevice
) );
//
// Build triangle GAS
//
uint32_t triangle_input_flags[MAT_COUNT] =
{
// One per SBT record for this build input
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT,
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT
};
OptixBuildInput triangle_input = {};
triangle_input.type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
triangle_input.triangleArray.vertexFormat = OPTIX_VERTEX_FORMAT_FLOAT3;
triangle_input.triangleArray.vertexStrideInBytes = sizeof( Vertex );
triangle_input.triangleArray.numVertices = static_cast<uint32_t>( g_vertices.size() );
triangle_input.triangleArray.vertexBuffers = &pd_state.d_vertices;
triangle_input.triangleArray.flags = triangle_input_flags;
triangle_input.triangleArray.numSbtRecords = MAT_COUNT;
triangle_input.triangleArray.sbtIndexOffsetBuffer = d_mat_indices;
triangle_input.triangleArray.sbtIndexOffsetSizeInBytes = sizeof(uint32_t);
triangle_input.triangleArray.sbtIndexOffsetStrideInBytes = sizeof(uint32_t);
OptixAccelBuildOptions accel_options = {};
accel_options.buildFlags = OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
accel_options.operation = OPTIX_BUILD_OPERATION_BUILD;
OptixAccelBufferSizes gas_buffer_sizes;
OPTIX_CHECK( optixAccelComputeMemoryUsage(
pd_state.context,
&accel_options,
&triangle_input,
1, // num_build_inputs
&gas_buffer_sizes
) );
CUdeviceptr d_temp_buffer;
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_temp_buffer ), gas_buffer_sizes.tempSizeInBytes ) );
// non-compacted output
CUdeviceptr d_buffer_temp_output_gas_and_compacted_size;
size_t compactedSizeOffset = roundUp<size_t>( gas_buffer_sizes.outputSizeInBytes, 8ull );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_buffer_temp_output_gas_and_compacted_size ),
compactedSizeOffset + 8
) );
OptixAccelEmitDesc emitProperty = {};
emitProperty.type = OPTIX_PROPERTY_TYPE_COMPACTED_SIZE;
emitProperty.result = ( CUdeviceptr )( (char*)d_buffer_temp_output_gas_and_compacted_size + compactedSizeOffset );
OPTIX_CHECK( optixAccelBuild( pd_state.context,
0, // CUDA stream
&accel_options,
&triangle_input,
1, // num build inputs
d_temp_buffer,
gas_buffer_sizes.tempSizeInBytes,
d_buffer_temp_output_gas_and_compacted_size,
gas_buffer_sizes.outputSizeInBytes,
&pd_state.gas_handle,
&emitProperty, // emitted property list
1 // num emitted properties
) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_temp_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( d_mat_indices ) ) );
size_t compacted_gas_size;
CUDA_CHECK( cudaMemcpy( &compacted_gas_size, (void*)emitProperty.result, sizeof(size_t), cudaMemcpyDeviceToHost ) );
if( compacted_gas_size < gas_buffer_sizes.outputSizeInBytes )
{
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &pd_state.d_gas_output_buffer ), compacted_gas_size ) );
// use handle as input and output
OPTIX_CHECK( optixAccelCompact( pd_state.context, 0, pd_state.gas_handle, pd_state.d_gas_output_buffer,
compacted_gas_size, &pd_state.gas_handle ) );
CUDA_CHECK( cudaFree( (void*)d_buffer_temp_output_gas_and_compacted_size ) );
}
else
{
pd_state.d_gas_output_buffer = d_buffer_temp_output_gas_and_compacted_size;
}
}
void createModule( PerDeviceSampleState& pd_state )
{
OptixModuleCompileOptions module_compile_options = {};
module_compile_options.maxRegisterCount = OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
module_compile_options.optLevel = OPTIX_COMPILE_OPTIMIZATION_DEFAULT;
module_compile_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL;
pd_state.pipeline_compile_options.usesMotionBlur = false;
pd_state.pipeline_compile_options.traversableGraphFlags = OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS;
pd_state.pipeline_compile_options.numPayloadValues = 2;
pd_state.pipeline_compile_options.numAttributeValues = 2;
pd_state.pipeline_compile_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE; // should be OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW;
pd_state.pipeline_compile_options.pipelineLaunchParamsVariableName = "params";
size_t inputSize = 0;
const char* input = sutil::getInputData( OPTIX_SAMPLE_NAME, OPTIX_SAMPLE_DIR, "optixNVLink.cu", inputSize );
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixModuleCreateFromPTX(
pd_state.context,
&module_compile_options,
&pd_state.pipeline_compile_options,
input,
inputSize,
log,
&sizeof_log,
&pd_state.ptx_module
) );
}
void createProgramGroups( PerDeviceSampleState& pd_state )
{
OptixProgramGroupOptions program_group_options = {};
OptixProgramGroupDesc raygen_prog_group_desc = {};
raygen_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
raygen_prog_group_desc.raygen.module = pd_state.ptx_module;
raygen_prog_group_desc.raygen.entryFunctionName = "__raygen__rg";
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixProgramGroupCreate(
pd_state.context,
&raygen_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&pd_state.raygen_prog_group
) );
OptixProgramGroupDesc miss_prog_group_desc = {};
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = pd_state.ptx_module;
miss_prog_group_desc.miss.entryFunctionName = "__miss__radiance";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
pd_state.context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&pd_state.radiance_miss_group
) );
memset( &miss_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
miss_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
miss_prog_group_desc.miss.module = nullptr; // NULL miss program for occlusion rays
miss_prog_group_desc.miss.entryFunctionName = nullptr;
OPTIX_CHECK_LOG( optixProgramGroupCreate(
pd_state.context,
&miss_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&pd_state.occlusion_miss_group
) );
OptixProgramGroupDesc hit_prog_group_desc = {};
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = pd_state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__radiance";
OPTIX_CHECK_LOG( optixProgramGroupCreate(
pd_state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&pd_state.radiance_hit_group
) );
memset( &hit_prog_group_desc, 0, sizeof( OptixProgramGroupDesc ) );
hit_prog_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
hit_prog_group_desc.hitgroup.moduleCH = pd_state.ptx_module;
hit_prog_group_desc.hitgroup.entryFunctionNameCH = "__closesthit__occlusion";
OPTIX_CHECK( optixProgramGroupCreate(
pd_state.context,
&hit_prog_group_desc,
1, // num program groups
&program_group_options,
log,
&sizeof_log,
&pd_state.occlusion_hit_group
) );
}
void createPipeline( PerDeviceSampleState& pd_state )
{
const uint32_t max_trace_depth = 2;
OptixProgramGroup program_groups[] =
{
pd_state.raygen_prog_group,
pd_state.radiance_miss_group,
pd_state.occlusion_miss_group,
pd_state.radiance_hit_group,
pd_state.occlusion_hit_group
};
OptixPipelineLinkOptions pipeline_link_options = {};
pipeline_link_options.maxTraceDepth = max_trace_depth;
pipeline_link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_FULL;
char log[2048];
size_t sizeof_log = sizeof( log );
OPTIX_CHECK_LOG( optixPipelineCreate(
pd_state.context,
&pd_state.pipeline_compile_options,
&pipeline_link_options,
program_groups,
sizeof( program_groups ) / sizeof( program_groups[0] ),
log,
&sizeof_log,
&pd_state.pipeline
) );
OptixStackSizes stack_sizes = {};
for( auto& prog_group : program_groups )
{
OPTIX_CHECK( optixUtilAccumulateStackSizes( prog_group, &stack_sizes ) );
}
uint32_t direct_callable_stack_size_from_traversal;
uint32_t direct_callable_stack_size_from_state;
uint32_t continuation_stack_size;
OPTIX_CHECK( optixUtilComputeStackSizes( &stack_sizes, max_trace_depth,
0, // maxCCDepth
0, // maxDCDEpth
&direct_callable_stack_size_from_traversal,
&direct_callable_stack_size_from_state, &continuation_stack_size ) );
OPTIX_CHECK( optixPipelineSetStackSize( pd_state.pipeline, direct_callable_stack_size_from_traversal,
direct_callable_stack_size_from_state, continuation_stack_size,
1 // maxTraversableDepth
) );
}
void createSBT( PerDeviceSampleState& pd_state )
{
CUdeviceptr d_raygen_record;
const size_t raygen_record_size = sizeof( RayGenRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_raygen_record ), raygen_record_size ) );
RayGenRecord rg_sbt;
OPTIX_CHECK( optixSbtRecordPackHeader( pd_state.raygen_prog_group, &rg_sbt ) );
rg_sbt.data = {1.0f, 0.f, 0.f};
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_raygen_record ),
&rg_sbt,
raygen_record_size,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_miss_records;
const size_t miss_record_size = sizeof( MissRecord );
CUDA_CHECK( cudaMalloc( reinterpret_cast<void**>( &d_miss_records ), miss_record_size*RAY_TYPE_COUNT ) );
MissRecord ms_sbt[2];
OPTIX_CHECK( optixSbtRecordPackHeader( pd_state.radiance_miss_group, &ms_sbt[0] ) );
ms_sbt[0].data = {0.0f, 0.0f, 0.0f};
OPTIX_CHECK( optixSbtRecordPackHeader( pd_state.occlusion_miss_group, &ms_sbt[1] ) );
ms_sbt[1].data = {0.0f, 0.0f, 0.0f};
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_miss_records ),
ms_sbt,
miss_record_size*RAY_TYPE_COUNT,
cudaMemcpyHostToDevice
) );
CUdeviceptr d_hitgroup_records;
const size_t hitgroup_record_size = sizeof( HitGroupRecord );
CUDA_CHECK( cudaMalloc(
reinterpret_cast<void**>( &d_hitgroup_records ),
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT
) );
HitGroupRecord hitgroup_records[ RAY_TYPE_COUNT*MAT_COUNT ];
for( int i = 0; i < MAT_COUNT; ++i )
{
{
const int sbt_idx = i*RAY_TYPE_COUNT+0; // SBT for radiance ray-type for ith material
OPTIX_CHECK( optixSbtRecordPackHeader( pd_state.radiance_hit_group, &hitgroup_records[sbt_idx] ) );
hitgroup_records[ sbt_idx ].data.emission_color = g_emission_colors[i];
hitgroup_records[ sbt_idx ].data.diffuse_color = g_diffuse_colors[i];
hitgroup_records[ sbt_idx ].data.vertices = reinterpret_cast<float4*>(pd_state.d_vertices);
hitgroup_records[ sbt_idx ].data.tex_coords = reinterpret_cast<float2*>(pd_state.d_tex_coords);
hitgroup_records[ sbt_idx ].data.diffuse_texture = getDiffuseTextureObject( i, pd_state );
}
{
const int sbt_idx = i*RAY_TYPE_COUNT+1; // SBT for occlusion ray-type for ith material
memset( &hitgroup_records[sbt_idx], 0, hitgroup_record_size );
OPTIX_CHECK( optixSbtRecordPackHeader( pd_state.occlusion_hit_group, &hitgroup_records[sbt_idx] ) );
}
}
CUDA_CHECK( cudaMemcpy(
reinterpret_cast<void*>( d_hitgroup_records ),
hitgroup_records,
hitgroup_record_size*RAY_TYPE_COUNT*MAT_COUNT,
cudaMemcpyHostToDevice
) );
pd_state.sbt.raygenRecord = d_raygen_record;
pd_state.sbt.missRecordBase = d_miss_records;
pd_state.sbt.missRecordStrideInBytes = static_cast<uint32_t>( miss_record_size );
pd_state.sbt.missRecordCount = RAY_TYPE_COUNT;
pd_state.sbt.hitgroupRecordBase = d_hitgroup_records;
pd_state.sbt.hitgroupRecordStrideInBytes = static_cast<uint32_t>( hitgroup_record_size );
pd_state.sbt.hitgroupRecordCount = RAY_TYPE_COUNT*MAT_COUNT;
}
void cleanupState( PerDeviceSampleState& pd_state )
{
OPTIX_CHECK( optixPipelineDestroy ( pd_state.pipeline ) );
OPTIX_CHECK( optixProgramGroupDestroy ( pd_state.raygen_prog_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( pd_state.radiance_miss_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( pd_state.radiance_hit_group ) );
OPTIX_CHECK( optixProgramGroupDestroy ( pd_state.occlusion_hit_group ) );
OPTIX_CHECK( optixModuleDestroy ( pd_state.ptx_module ) );
OPTIX_CHECK( optixDeviceContextDestroy( pd_state.context ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.sbt.raygenRecord ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.sbt.missRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.sbt.hitgroupRecordBase ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_vertices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_gas_output_buffer ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_sample_indices ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_sample_accum ) ) );
CUDA_CHECK( cudaFree( reinterpret_cast<void*>( pd_state.d_params ) ) );
}
//------------------------------------------------------------------------------
//
// Texture management functions
//
//------------------------------------------------------------------------------
void destroyTextures( std::vector<PerDeviceSampleState>& pd_states )
{
for( int i = 0; i < static_cast<int>( pd_states.size() ); ++i )
{
int device_idx = pd_states[i].device_idx;
CUDA_CHECK( cudaSetDevice( device_idx ) );
for( int material_id = 0; material_id < MAT_COUNT; ++material_id )
{
if( g_diffuse_textures[material_id][device_idx] != 0 )
CUDA_CHECK( cudaDestroyTextureObject( g_diffuse_textures[material_id][device_idx] ) );
if( g_diffuse_texture_data[material_id][device_idx] != 0 )
CUDA_CHECK( cudaFreeArray( g_diffuse_texture_data[material_id][device_idx] ) );
}
}
}
cudaTextureObject_t getDiffuseTextureObject( int material_id, PerDeviceSampleState& pd_state )
{
// If the device has a texture on it, use that one
int device_idx = pd_state.device_idx;
if( g_diffuse_textures[material_id][device_idx] != 0 )
return g_diffuse_textures[material_id][device_idx];
// Otherwise, try to find a texture on the same island as the device
int island = pd_state.peers | ( 1 << device_idx );
size_t num_devices = g_diffuse_textures[material_id].size();
for( int peer_id = 0; peer_id < static_cast<int>( num_devices ); ++peer_id )
{
bool peer_in_island = ( island & ( 1 << peer_id ) ) != 0;
bool texture_exists_on_peer = ( g_diffuse_textures[material_id][peer_id] != 0 );
if( peer_in_island && texture_exists_on_peer )
return g_diffuse_textures[material_id][peer_id];
}
return 0;
}
void createTextureImageOnHost( float4* image_data, int width, int height, int material_id )
{
int tiles_per_side = 8;
for( int j = 0; j < height; j++ )
{
for( int i = 0; i < width; i++ )
{
// texture coordinates of pixel
float s = i / (float)width;
float t = j / (float)height;
// texture coordinates within the current tile
float ss = ( s * tiles_per_side ) - static_cast<int>( s * tiles_per_side );
float tt = ( t * tiles_per_side ) - static_cast<int>( t * tiles_per_side );
// use L-norm distance from center of tile to vary shape
float n = material_id + 0.1f; // L-norm
float d = powf( powf( fabs( ss - 0.5f ), n ) + powf( fabs( tt - 0.5f ), n ), 1.0f / n ) * 2.03f;
d = ( d < 1.0f ) ? 1.0f - powf( d, 80.0f ) : 0.0f;
image_data[j * width + i] = {d * s, d * t, 0.3f * ( 1.0f - d ), 0.0f};
}
}
}
cudaTextureObject_t defineTextureOnDevice( int device_idx, cudaArray_t tex_array, int tex_width, int tex_height )
{
CUDA_CHECK( cudaSetDevice( device_idx ) );
cudaResourceDesc res_desc;
std::memset( &res_desc, 0, sizeof( cudaResourceDesc ) );
res_desc.resType = cudaResourceTypeArray;
res_desc.res.array.array = tex_array;
cudaTextureDesc tex_desc;
std::memset( &tex_desc, 0, sizeof( cudaTextureDesc ) );
tex_desc.addressMode[0] = cudaAddressModeClamp;
tex_desc.addressMode[1] = cudaAddressModeClamp;
tex_desc.filterMode = cudaFilterModeLinear;
tex_desc.readMode = cudaReadModeElementType;
tex_desc.normalizedCoords = 1;
cudaResourceViewDesc* res_view_desc = nullptr;
cudaTextureObject_t tex;
CUDA_CHECK( cudaCreateTextureObject( &tex, &res_desc, &tex_desc, res_view_desc ) );
return tex;
}
float loadTextureOnDevice( int mat_index, int device_idx )
{
std::cout << "LOADING TEXTURE: material " << mat_index << " on device " << device_idx << ".\n";
CUDA_CHECK( cudaSetDevice( device_idx ) );
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc( 32, 32, 32, 32, cudaChannelFormatKindFloat );
const int tex_width = TEXTURE_WIDTH;
int tex_height = tex_width;
CUDA_CHECK( cudaMallocArray( &g_diffuse_texture_data[mat_index][device_idx], &channel_desc, tex_width, tex_height ) );
std::vector<float4> h_texture_data( tex_width * tex_height );
createTextureImageOnHost( h_texture_data.data(), tex_width, tex_height, mat_index );
int width_in_bytes = tex_width * sizeof( float4 );
int pitch = width_in_bytes;
CUDA_CHECK( cudaMemcpy2DToArray( g_diffuse_texture_data[mat_index][device_idx], 0, 0, h_texture_data.data(), pitch,
width_in_bytes, tex_height, cudaMemcpyHostToDevice ) );
g_diffuse_textures[mat_index][device_idx] =
defineTextureOnDevice( device_idx, g_diffuse_texture_data[mat_index][device_idx], tex_width, tex_height );
float tex_mem_usage = static_cast<float>( tex_width * tex_height * sizeof( float4 ) );
return tex_mem_usage;
}
int getIslandDeviceWithLowestTextureUsage( int island )
{
int min_device_id = 0;
float min_device_usage = FLT_MAX;
int device_idx = 0;
while( ( 1 << device_idx ) <= island )
{
bool device_in_island = ( island & ( 1 << device_idx ) ) != 0;
bool mem_usage_lower = g_device_tex_usage[device_idx] < min_device_usage;
if( device_in_island && mem_usage_lower )
{
min_device_usage = g_device_tex_usage[device_idx];
min_device_id = device_idx;
}
device_idx++;
}
return min_device_id;
}
float loadTexture( std::vector<PerDeviceSampleState>& pd_states, std::vector<int>& p2p_islands, int mat_index )
{
bool share_per_island = ( g_peer_usage != PEERS_NONE && g_share_textures );
float tex_mem = 0.0f;
if( share_per_island == true )
{
// Load the texture on one of the devices
for( int i = 0; i < static_cast<int>( p2p_islands.size() ); ++i )
{
int device_idx = getIslandDeviceWithLowestTextureUsage( p2p_islands[i] );
float tex_size = loadTextureOnDevice( mat_index, device_idx );
g_device_tex_usage[device_idx] += tex_size;
tex_mem += tex_size;
// Make texture samplers for each device in the island, but reuse the data array
int island = p2p_islands[i];
int peer_idx = 0;
cudaArray_t tex_array = g_diffuse_texture_data[mat_index][device_idx];
while( ( 1 << peer_idx ) <= island )
{
// If peer_idx is a peer of device_idx
if ( (peer_idx != device_idx) && (island & (1 << peer_idx)) )
{
g_diffuse_textures[mat_index][peer_idx] =
defineTextureOnDevice( peer_idx, tex_array, TEXTURE_WIDTH, TEXTURE_WIDTH );
}
peer_idx++;
}
}
}
else
{
for( int i = 0; i < static_cast<int>( pd_states.size() ); ++i )
{
int device_idx = pd_states[i].device_idx;
float tex_size = loadTextureOnDevice( mat_index, device_idx );
g_device_tex_usage[device_idx] += tex_size;
tex_mem += tex_size;
}
}
return tex_mem;
}
void loadTextures( std::vector<PerDeviceSampleState>& pd_states, std::vector<int>& p2p_islands )
{
size_t num_devices = pd_states.size();
g_device_tex_usage.resize( num_devices, 0.0f );
float total_tex_mem = 0.0f;
for( int mat_index = 0; mat_index < MAT_COUNT; ++mat_index )
{
g_diffuse_texture_data[mat_index].resize( num_devices, 0 );
g_diffuse_textures[mat_index].resize( num_devices, 0 );
// If a texture is required for this material, make it
if( g_make_diffuse_textures[mat_index] )
{
total_tex_mem += loadTexture( pd_states, p2p_islands, mat_index );
}
}
std::cout << "TEXTURE MEMORY USAGE: " << (total_tex_mem / (1<<20)) << " MB\n";
}
//------------------------------------------------------------------------------
//
// P2P / NVLINK functions
//
//------------------------------------------------------------------------------
int getGlInteropDeviceId( int num_devices )
{
for ( int device_idx=0; device_idx < num_devices; ++device_idx )
{
int is_display_device = 0;
CUDA_CHECK( cudaDeviceGetAttribute( &is_display_device, cudaDevAttrKernelExecTimeout, device_idx ) );
if (is_display_device)
{
std::cout << "DISPLAY DEVICE: " << device_idx << "\n";
return device_idx;
}
}
std::cerr << "ERROR: Could not determine GL interop device\n";
return -1;
}
void enablePeerAccess( std::vector<PerDeviceSampleState>& pd_states )
{
size_t num_devices = pd_states.size();
for( int device_idx = 0; device_idx < static_cast<int>( num_devices ); ++device_idx )
{
CUDA_CHECK( cudaSetDevice( device_idx ) );
for( int peer_idx = 0; peer_idx < static_cast<int>( num_devices ); ++peer_idx )
{
if (peer_idx == device_idx)
continue;
int access = 0;
cudaDeviceCanAccessPeer(&access, device_idx, peer_idx);
if (access)
cudaDeviceEnablePeerAccess( peer_idx, 0 );
}
}
}
void shutdownPeerAccess( std::vector<PerDeviceSampleState>& pd_states )
{
size_t num_devices = pd_states.size();
for( int device_idx = 0; device_idx < static_cast<int>( num_devices ); ++device_idx )
{
CUDA_CHECK( cudaSetDevice( device_idx ) );
for( int peer_idx = 0; peer_idx < static_cast<int>( num_devices ); ++peer_idx )
{
if ( (1<<peer_idx) != 0 )
cudaDeviceDisablePeerAccess( peer_idx );
}
}
}
#if OPTIX_USE_NVML
nvmlDevice_t getNvmlDeviceHandle( PerDeviceSampleState& pd_state )
{
nvmlDevice_t device = nullptr;
nvmlReturn_t result = nvmlDeviceGetHandleByIndex_p( pd_state.device_idx, &device );
if( result != NVML_SUCCESS )
std::cerr << "Could not get device handle for index " << pd_state.device_idx << "\n";
return device;
}
#endif
#if OPTIX_USE_NVML
std::string getPciBusId( PerDeviceSampleState& pd_state )
{
nvmlDevice_t device = getNvmlDeviceHandle( pd_state );
if( device == nullptr )
return "";
nvmlPciInfo_t pci_info;
memset( &pci_info, 0, sizeof( pci_info ) );
nvmlReturn_t result = nvmlDeviceGetPciInfo_p( device, &pci_info );
if( NVML_SUCCESS != result )
return "";
return std::string( pci_info.busId );
}
#endif
void printIsland( int island )
{
std::cout << "{";
int device_idx = 0;
while ( (1<<device_idx) <= island )
{
if ( (1<<device_idx) & island )
{
std::cout << device_idx;
if ( (1<<(device_idx+1)) <= island )
std::cout << ",";
}
device_idx++;
}
std::cout << "} ";
}
void computeP2PIslands( std::vector<PerDeviceSampleState>& pd_states, std::vector<int>& islands )
{
std::cout << "P2P ISLANDS: ";
islands.clear();
for( int i = 0; i < static_cast<int>( pd_states.size() ); ++i )
{
int island = pd_states[i].peers | ( 1 << pd_states[i].device_idx );
if( std::find( islands.begin(), islands.end(), island ) == islands.end() )
{
islands.push_back( island );
printIsland( island );
}
}
std::cout << "\n";
}
void findPeersForDevice( std::vector<PerDeviceSampleState>& pd_states, int device_idx, bool require_nvlink )
{
#if OPTIX_USE_NVML
// Clear the set of peers for the current device
pd_states[device_idx].peers = 0;
nvmlReturn_t result;
nvmlDevice_t device = getNvmlDeviceHandle( pd_states[device_idx] );
std::string pci_bus_id = getPciBusId( pd_states[device_idx] );
// Check each link
for( unsigned int link = 0; link < NVML_NVLINK_MAX_LINKS; ++link )
{
// Check if P2P is supported on this link
unsigned int capResult = 0;
result = nvmlDeviceGetNvLinkCapability_p( device, link, NVML_NVLINK_CAP_P2P_SUPPORTED, &capResult );
if( result != NVML_SUCCESS || capResult == 0 )
continue;
// Check if NVLINK is active on this link (if required)
if( require_nvlink )
{
nvmlEnableState_t isActive = NVML_FEATURE_DISABLED;
result = nvmlDeviceGetNvLinkState_p( device, link, &isActive );
if( result != NVML_SUCCESS || isActive != NVML_FEATURE_ENABLED )
continue;
}
// Check if we're connected to another device on this link
nvmlPciInfo_t pci = {{0}};
result = nvmlDeviceGetNvLinkRemotePciInfo_p( device, link, &pci );
if( result != NVML_SUCCESS )
continue;
// Find neighbors with the same id as the device we are connected to
// and add them as peers
std::string pci_id( pci.busId );
bool found = false;
for( int i = 0; i < static_cast<int>( pd_states.size() ); ++i )
{
std::string peerPciId = getPciBusId( pd_states[i] );
if( std::string( pci.busId ) == pci_id )
{
int peer_idx = pd_states[i].device_idx;
pd_states[device_idx].peers |= ( 1 << peer_idx );
found = true;
//break;
}
}
if( !found )
std::cerr << "Unable to locate device with id " << pci_id << " in active devices.\n";
}
#else
if ( require_nvlink == true && device_idx == 0 )
std::cout << "NVML NOT SUPPORTED. Cannot query nvlink. Treating all P2P connections as nvlink.\n";
size_t num_devices = pd_states.size();
CUDA_CHECK( cudaSetDevice( device_idx ) );
for( int peer_idx = 0; peer_idx < static_cast<int>( num_devices ); ++peer_idx )
{
if( peer_idx == device_idx )
continue;
int access = 0;
cudaDeviceCanAccessPeer( &access, device_idx, peer_idx );
if ( access )
pd_states[device_idx].peers |= ( 1 << peer_idx );
}
#endif
}
void findPeers( std::vector<PerDeviceSampleState>& pd_states, bool require_nvlink )
{
for( int i = 0; i < static_cast<int>( pd_states.size() ); ++i )
findPeersForDevice( pd_states, i, require_nvlink );
}
#if OPTIX_USE_NVML
void initializeNvml()
{
CUDA_CHECK( cudaFree( 0 ) ); // Make sure cuda is initialized first
nvmlReturn_t result = nvmlInit_p();
if( result != NVML_SUCCESS )
std::cerr << "ERROR: nvmlInit() failed (code " << result << ")\n";
char buff[1024];
result = nvmlSystemGetDriverVersion_p( buff, 1024 );
if( result == NVML_SUCCESS )
std::cout << "DRIVER VERSION: " << buff << "\n";
else
std::cerr << "ERROR: Unable to get driver version (code " << result << ")\n";
}
#endif
//------------------------------------------------------------------------------
//
// Main
//
//------------------------------------------------------------------------------
void parseCommandLine( int argc, char* argv[] )
{
for( int i = 1; i < argc; ++i )
{
const std::string arg = argv[i];
if( arg == "--help" || arg == "-h" )
{
printUsageAndExit( argv[0] );
}
else if( arg == "--file" || arg == "-f" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
g_outfile = argv[++i];
}
else if( arg == "--launch-samples" || arg == "-s" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
samples_per_launch = atoi( argv[++i] );
}
else if( arg == "--device-color-scale" || arg == "-d" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
g_device_color_scale = static_cast<float>( atof( argv[++i] ) );
}
else if ( arg == "--peers" || arg == "-p" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
const std::string arg1 = argv[++i];
if (arg1 == "none")
g_peer_usage = PEERS_NONE;
else if (arg1 == "nvlink")
g_peer_usage = PEERS_NVLINK;
else if (arg1 == "all")
g_peer_usage = PEERS_ALL;
else
printUsageAndExit( argv[0] );
}
else if ( arg == "--optimize-framebuffer" || arg == "-o" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
const std::string arg1 = argv[++i];
if (arg1 == "false")
g_optimize_framebuffer = false;
else if (arg1 == "true")
g_optimize_framebuffer = true;
else
printUsageAndExit( argv[0] );
}
else if ( arg == "--share-textures" || arg == "-t" )
{
if( i >= argc - 1 )
printUsageAndExit( argv[0] );
const std::string arg1 = argv[++i];
if (arg1 == "false")
g_share_textures = false;
else if (arg1 == "true")
g_share_textures = true;
else
printUsageAndExit( argv[0] );
}
else
{
std::cerr << "ERROR: Unknown option '" << argv[i] << "'\n";
printUsageAndExit( argv[0] );
}
}
}
int main( int argc, char* argv[] )
{
parseCommandLine( argc, argv );
try
{
#if OPTIX_USE_NVML
g_nvmlLoaded = loadNvmlFunctions();
if ( g_nvmlLoaded )
initializeNvml();
#endif
//
// Set up per-device render states
//
std::vector<PerDeviceSampleState> pd_states;
createContexts( pd_states );
//
// Determine P2P topology, and load textures accordingly
//
if (g_peer_usage != PEERS_NONE)
{
enablePeerAccess( pd_states );
bool require_nvlink = (g_peer_usage == PEERS_NVLINK);
findPeers( pd_states, require_nvlink );
}
std::vector<int> p2p_islands;
computeP2PIslands( pd_states, p2p_islands );
loadTextures( pd_states, p2p_islands );
//
// Set up OptiX state
//
for( PerDeviceSampleState& pd_state : pd_states )
{
CUDA_CHECK( cudaSetDevice( pd_state.device_idx ) );
uploadAdditionalShadingData( pd_state );
buildMeshAccel ( pd_state );
createModule ( pd_state );
createProgramGroups( pd_state );
createPipeline ( pd_state );
createSBT ( pd_state );
allocIOBuffers ( pd_state, static_cast<int>( pd_states.size() ) );
}
for( PerDeviceSampleState& pd_state : pd_states )
{
initLaunchParams( pd_state );
}
initCameraState();
GLFWwindow* window = nullptr;
//
// If the output file is empty, go into interactive mode
//
if( g_outfile == "" )
{
// Set up GUI and callbacks
window = sutil::initUI( "optixNVLink", width, height );
glfwSetMouseButtonCallback ( window, mouseButtonCallback );
glfwSetCursorPosCallback ( window, cursorPosCallback );
glfwSetWindowSizeCallback ( window, windowSizeCallback );
glfwSetWindowIconifyCallback( window, windowIconifyCallback );
glfwSetKeyCallback ( window, keyCallback );
int gl_interop_device = getGlInteropDeviceId( static_cast<int>( pd_states.size() ) );
// Decide on the frame buffer type. Use ZERO_COPY memory as a default,
// which copies the frame buffer data through pinned host memory.
sutil::CUDAOutputBufferType buff_type = sutil::CUDAOutputBufferType::ZERO_COPY;
// When using a single GPU that is also the gl interop device, render directly
// into a gl interop buffer, avoiding copies.
if ( g_optimize_framebuffer && pd_states.size() == 1 && gl_interop_device == 0 )
{
buff_type = sutil::CUDAOutputBufferType::GL_INTEROP;
}
// If using multiple GPUs are fully connected (and one of them is the
// gl interop device) use a device-side buffer to avoid copying to host and back.
// Note that it can't render directly into a gl interop buffer in the multi-GPU case.
else if ( g_optimize_framebuffer && p2p_islands.size() == 1 && ((1<<gl_interop_device) & p2p_islands[0]) )
{
buff_type = sutil::CUDAOutputBufferType::CUDA_P2P;
}
// Make the frame buffer
sutil::CUDAOutputBuffer<uchar4> output_buffer( buff_type, width, height );
int output_device = (gl_interop_device >= 0) ? gl_interop_device : 0;
output_buffer.setDevice( output_device );
sutil::GLDisplay gl_display;
// Timing variables
std::chrono::duration<double> state_update_time( 0.0 );
std::chrono::duration<double> render_time( 0.0 );
std::chrono::duration<double> display_time( 0.0 );
// Render loop
do
{
auto t0 = std::chrono::steady_clock::now();
glfwPollEvents();
updateDeviceStates( output_buffer, pd_states );
auto t1 = std::chrono::steady_clock::now();
state_update_time += t1 - t0;
t0 = t1;
launchSubframe( output_buffer, pd_states );
t1 = std::chrono::steady_clock::now();
render_time += t1 - t0;
t0 = t1;
displaySubframe( output_buffer, gl_display, window );
t1 = std::chrono::steady_clock::now();
display_time += t1-t0;
sutil::displayStats( state_update_time, render_time, display_time );
glfwSwapBuffers(window);
for( PerDeviceSampleState& pd_state : pd_states )
++pd_state.params.subframe_index;
} while( !glfwWindowShouldClose( window ) );
// Make sure all of the CUDA streams finish
for( PerDeviceSampleState& pd_state : pd_states )
{
CUDA_CHECK( cudaSetDevice( pd_state.device_idx ) );
CUDA_SYNC_CHECK();
}
}
//
// If an output file was named, render and save
//
else
{
sutil::CUDAOutputBuffer<uchar4> output_buffer( sutil::CUDAOutputBufferType::ZERO_COPY, width, height );
output_buffer.setDevice( 0 );
updateDeviceStates( output_buffer, pd_states );
launchSubframe( output_buffer, pd_states );
sutil::ImageBuffer buffer;
buffer.data = output_buffer.getHostPointer();
buffer.width = output_buffer.width();
buffer.height = output_buffer.height();
buffer.pixel_format = sutil::BufferImageFormat::UNSIGNED_BYTE4;
sutil::saveImage( g_outfile.c_str(), buffer, false );
}
//
// Clean up resources
//
if ( window )
sutil::cleanupUI( window );
destroyTextures( pd_states );
shutdownPeerAccess( pd_states );
for( PerDeviceSampleState& pd_state : pd_states )
cleanupState( pd_state );
}
catch( std::exception& e )
{
std::cerr << "Caught exception: " << e.what() << "\n";
return 1;
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixNVLink/optixNVLink.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#define OPTIX_COMPATIBILITY 7
#include <optix.h>
#include "optixNVLink.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda/random.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
// Per ray data, and getting at it
//
//------------------------------------------------------------------------------
// Per-ray data for radiance rays
struct RadiancePRD
{
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) );
}
// Per-ray data for occlusion rays
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
//------------------------------------------------------------------------------
//
// Sampling and color
//
//------------------------------------------------------------------------------
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if( fabs(m_normal.x) > fabs(m_normal.z) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
__forceinline__ __device__ float3 deviceColor( unsigned int idx )
{
return make_float3(
idx == 0 ? 0.05f : 0.0f,
idx == 1 ? 0.05f : 0.0f,
idx == 2 ? 0.05f : 0.0f
);
}
//------------------------------------------------------------------------------
//
// Tracing rays
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
//------------------------------------------------------------------------------
//
// Optix Programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const uint3 launch_idx = optixGetLaunchIndex();
const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ];
// Work distribution might assign tiles that cross over image boundary
if( pixel_idx.x > w-1 || pixel_idx.y > h-1 )
return;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 )
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f);
const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x;
float3 device_color = deviceColor( params.device_idx ) * params.device_color_scale;
params.result_buffer[ image_index ] = make_color ( accum_color + device_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
RadiancePRD* prd = getPRD();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
// Compute normal and hit point
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
// Account for emission
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
// Compute attenuation (diffuse color) using texture if available
cudaTextureObject_t texture = rt_data->diffuse_texture;
if (texture != 0)
{
// get barycentric coordinates
const float2 barycentrics = optixGetTriangleBarycentrics();
const float b1 = barycentrics.x;
const float b2 = barycentrics.y;
const float b0 = 1.0f - (b1 + b2);
// compute texture coordinates
const int vindex = optixGetPrimitiveIndex() * 3;
const float2 t0 = rt_data->tex_coords[ vindex+0 ];
const float2 t1 = rt_data->tex_coords[ vindex+1 ];
const float2 t2 = rt_data->tex_coords[ vindex+2 ];
float2 tex_coord = b0*t0 + b1*t1 + b2*t2;
float s = tex_coord.x;
float t = tex_coord.y;
// sample texture
float4 tex_val = tex2D<float4>( rt_data->diffuse_texture, s, t );
prd->attenuation *= make_float3( tex_val );
}
else
{
prd->attenuation *= rt_data->diffuse_color;
}
unsigned int seed = prd->seed;
// Sample a hemisphere direction and place in per-ray data
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->countEmitted = false;
}
// Sample a position on the light source
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - P );
const float3 L = normalize(light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
// Cast the shadow ray
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length(cross(light.v1, light.v2));
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/SDK/optixNVLink/optixNVLink.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
enum RayType
{
RAY_TYPE_RADIANCE = 0,
RAY_TYPE_OCCLUSION = 1,
RAY_TYPE_COUNT
};
struct ParallelogramLight
{
float3 corner;
float3 v1, v2;
float3 normal;
float3 emission;
};
struct Params
{
unsigned int subframe_index;
int2* sample_index_buffer;
float4* sample_accum_buffer;
uchar4* result_buffer;
unsigned int width;
unsigned int height;
unsigned int samples_per_launch;
unsigned int device_idx;
float3 eye;
float3 U;
float3 V;
float3 W;
ParallelogramLight light;
OptixTraversableHandle handle;
float device_color_scale; // to turn on/off multi-gpu pattern overlay
};
struct RayGenData
{
float r, g, b;
};
struct MissData
{
float r, g, b;
};
struct HitGroupData
{
float3 emission_color;
float3 diffuse_color;
float4* vertices;
float2* tex_coords;
cudaTextureObject_t diffuse_texture;
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.