Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,354 Bytes
648cdec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
#include <torch/extension.h>
#include "api.h"
#include <cstdint>
#include <vector>
/**
* Encode a list of sparse voxel morton codes into a sparse voxel octree
* NOTE: The input indices must be sorted in ascending order
*
* @param codes [N] uint32 tensor containing the morton codes
* @param depth The depth of the sparse voxel octree
*
* @return uint8 tensor containing the sparse voxel octree
*/
torch::Tensor encode_sparse_voxel_octree_cpu(
const torch::Tensor& codes,
const uint32_t depth
) {
size_t N_leaf = codes.size(0);
int* codes_data = codes.data_ptr<int>();
std::vector<uint8_t> svo;
std::vector<uint8_t> stack(depth-1);
std::vector<uint32_t> insert_stack(depth);
std::vector<uint32_t> stack_ptr(depth);
uint32_t code, insert_from;
// Root node
svo.push_back(0);
stack_ptr[0] = 0;
// Iterate over all codes and encode them into SVO
for (int i = 0; i < N_leaf; i++) {
code = codes_data[i];
// Convert code to insert stack (3bit per level)
for (uint32_t j = 0; j < depth; j++) {
insert_stack[j] = (code >> (3*(depth-1-j))) & 0x7;
}
// Compare insert stack to stack to determine which level to insert
if (i == 0) {
// First code, insert at level 0
insert_from = 0;
}
else {
// Compare insert stack to stack
for (insert_from = 0; insert_from < depth-1; insert_from++) {
if (insert_stack[insert_from] != stack[insert_from]) {
break;
}
}
}
// Insert new nodes from insert_from to depth-1
for (uint32_t j = insert_from; j < depth; j++) {
// Add new node to SVO
if (j > insert_from) {
svo.push_back(0);
stack_ptr[j] = svo.size()-1;
}
// Update parent pointers
svo[stack_ptr[j]] |= (1 << insert_stack[j]);
// Update stack
if (j < depth-1) {
stack[j] = insert_stack[j];
}
}
}
// Convert SVO to tensor
torch::Tensor svo_tensor = torch::from_blob(svo.data(), {static_cast<int64_t>(svo.size())}, torch::kUInt8).clone();
return svo_tensor;
}
void decode_sparse_voxel_octree_cpu_recursive(
const uint8_t* svo,
const uint32_t depth,
uint32_t& ptr,
std::vector<uint8_t>& stack,
std::vector<uint32_t>& codes
) {
uint8_t node = svo[ptr];
if (stack.size() == depth-1) {
// Leaf node, add code to list
uint32_t code = 0;
for (uint32_t i = 0; i < depth-1; i++) {
code |= (static_cast<uint32_t>(stack[i]) << (3*(depth-1-i)));
}
for (uint8_t i = 0; i < 8; i++) {
if (node & (1 << i)) {
code = (code & ~0x7) | i;
codes.push_back(code);
}
}
ptr++;
}
else {
// Internal node, recurse
ptr++;
for (uint8_t i = 0; i < 8; i++) {
if (node & (1 << i)) {
stack.push_back(i);
decode_sparse_voxel_octree_cpu_recursive(svo, depth, ptr, stack, codes);
stack.pop_back();
}
}
}
}
/**
* Decode a sparse voxel octree into a list of sparse voxel morton codes
*
* @param octree uint8 tensor containing the sparse voxel octree
* @param depth The depth of the sparse voxel octree
*
* @return [N] uint32 tensor containing the morton codes
* The codes are sorted in ascending order
*/
torch::Tensor decode_sparse_voxel_octree_cpu(
const torch::Tensor& octree,
const uint32_t depth
) {
uint8_t* octree_data = octree.data_ptr<uint8_t>();
std::vector<uint32_t> codes;
std::vector<uint8_t> stack;
stack.reserve(depth-2);
uint32_t ptr = 0;
// Decode SVO into list of codes
decode_sparse_voxel_octree_cpu_recursive(octree_data, depth, ptr, stack, codes);
// Convert codes to tensor
torch::Tensor codes_tensor = torch::from_blob(codes.data(), {static_cast<int64_t>(codes.size())}, torch::kInt32).clone();
return codes_tensor;
}
|