| #!/usr/bin/env python3 | |
| # Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. | |
| # | |
| # Redistribution and use in source and binary forms, with or without modification, are permitted | |
| # provided that the following conditions are met: | |
| # * Redistributions of source code must retain the above copyright notice, this list of | |
| # conditions and the following disclaimer. | |
| # * Redistributions in binary form must reproduce the above copyright notice, this list of | |
| # conditions and the following disclaimer in the documentation and/or other materials | |
| # provided with the distribution. | |
| # * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used | |
| # to endorse or promote products derived from this software without specific prior written | |
| # permission. | |
| # | |
| # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR | |
| # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND | |
| # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE | |
| # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
| # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
| # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, | |
| # STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| import torch | |
| import tinycudann as tcnn | |
| net = tcnn.Network( | |
| n_input_dims=3, | |
| n_output_dims=3, | |
| network_config={ | |
| "otype": "FullyFusedMLP", | |
| "activation": "ReLU", | |
| "output_activation": "None", | |
| "n_neurons": 16, | |
| "n_hidden_layers": 2, | |
| }, | |
| ).cuda() | |
| x = torch.rand(256, 3, device='cuda') | |
| y = net(x) | |
| y.sum().backward() # OK | |
| x2 = torch.rand(256, 3, device='cuda') | |
| y = net(x) | |
| y2 = net(x2) | |
| (y + y2).sum().backward() # RuntimeError: Must call forward() before calling backward() | |
| print("success!") | |