/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ /** @file trainable_buffer.cuh * @author Thomas Müller, NVIDIA * @brief An implementation of a trainable N-channel buffer within the tcnn API. */ #pragma once #include #include #include #include #include #include NGP_NAMESPACE_BEGIN template class TrainableBuffer : public tcnn::DifferentiableObject { using ResVector = Eigen::Matrix; public: TrainableBuffer(const ResVector& resolution) : m_resolution{resolution} { m_params_gradient_weight.resize(n_params()); } virtual ~TrainableBuffer() { } void inference_mixed_precision_impl(cudaStream_t stream, const tcnn::GPUMatrixDynamic& input, tcnn::GPUMatrixDynamic& output, bool use_inference_matrices = true) override { throw std::runtime_error{"The trainable buffer does not support inference(). Its content is meant to be used externally."}; } std::unique_ptr forward_impl(cudaStream_t stream, const tcnn::GPUMatrixDynamic& input, tcnn::GPUMatrixDynamic* output = nullptr, bool use_inference_matrices = false, bool prepare_input_gradients = false) override { throw std::runtime_error{"The trainable buffer does not support forward(). Its content is meant to be used externally."}; } void backward_impl( cudaStream_t stream, const tcnn::Context& ctx, const tcnn::GPUMatrixDynamic& input, const tcnn::GPUMatrixDynamic& output, const tcnn::GPUMatrixDynamic& dL_doutput, tcnn::GPUMatrixDynamic* dL_dinput = nullptr, bool use_inference_matrices = false, tcnn::EGradientMode param_gradients_mode = tcnn::EGradientMode::Overwrite ) override { throw std::runtime_error{"The trainable buffer does not support backward(). Its content is meant to be used externally."}; } void set_params_impl(T* params, T* inference_params, T* gradients) override { } void initialize_params(tcnn::pcg32& rnd, float* params_full_precision, float scale = 1) override { // Initialize the buffer to zero from the GPU CUDA_CHECK_THROW(cudaMemset(params_full_precision, 0, n_params()*sizeof(float))); } size_t n_params() const override { return m_resolution.prod() * N_DIMS; } uint32_t input_width() const override { return RANK; } uint32_t padded_output_width() const override { return N_DIMS; } uint32_t output_width() const override { return N_DIMS; } uint32_t required_input_alignment() const override { return 1; // No alignment required } std::vector> layer_sizes() const override { return {}; } T* gradient_weights() const { return m_params_gradient_weight.data(); } tcnn::json hyperparams() const override { return { {"otype", "TrainableBuffer"}, }; } private: ResVector m_resolution; tcnn::GPUMemory m_params_gradient_weight; }; NGP_NAMESPACE_END