file_path
stringlengths
3
280
file_language
stringclasses
66 values
content
stringlengths
1
1.04M
repo_name
stringlengths
5
92
repo_stars
int64
0
154k
repo_description
stringlengths
0
402
repo_primary_language
stringclasses
108 values
developer_username
stringlengths
1
25
developer_name
stringlengths
0
30
developer_company
stringlengths
0
82
lit/extern/spconv/include/tensorview/prettyprint.h
C/C++ Header
// Copyright Louis Delacroix 2010 - 2014. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // A pretty printing library for C++ // // Usage: // Include this header, and operator<< will "just work". #ifndef H_PRETTY_PRINT #define H_PRETTY_PRINT #include <cstddef> #include <iterator> #include <memory> #include <ostream> #include <set> #include <tuple> #include <type_traits> #include <unordered_set> #include <utility> #include <valarray> namespace pretty_print { namespace detail { // SFINAE type trait to detect whether T::const_iterator exists. struct sfinae_base { using yes = char; using no = yes[2]; }; template <typename T> struct has_const_iterator : private sfinae_base { private: template <typename C> static yes &test(typename C::const_iterator *); template <typename C> static no &test(...); public: static const bool value = sizeof(test<T>(nullptr)) == sizeof(yes); using type = T; }; template <typename T> struct has_begin_end : private sfinae_base { private: template <typename C> static yes & f(typename std::enable_if< std::is_same<decltype(static_cast<typename C::const_iterator (C::*)() const>(&C::begin)), typename C::const_iterator (C::*)() const>::value>::type *); template <typename C> static no &f(...); template <typename C> static yes & g(typename std::enable_if< std::is_same<decltype(static_cast<typename C::const_iterator (C::*)() const>(&C::end)), typename C::const_iterator (C::*)() const>::value, void>::type *); template <typename C> static no &g(...); public: static bool const beg_value = sizeof(f<T>(nullptr)) == sizeof(yes); static bool const end_value = sizeof(g<T>(nullptr)) == sizeof(yes); }; } // namespace detail // Holds the delimiter values for a specific character type template <typename TChar> struct delimiters_values { using char_type = TChar; const char_type *prefix; const char_type *delimiter; const char_type *postfix; }; // Defines the delimiter values for a specific container and character type template <typename T, typename TChar> struct delimiters { using type = delimiters_values<TChar>; static const type values; }; // Functor to print containers. You can use this directly if you want // to specificy a non-default delimiters type. The printing logic can // be customized by specializing the nested template. template <typename T, typename TChar = char, typename TCharTraits = ::std::char_traits<TChar>, typename TDelimiters = delimiters<T, TChar>> struct print_container_helper { using delimiters_type = TDelimiters; using ostream_type = std::basic_ostream<TChar, TCharTraits>; template <typename U> struct printer { static void print_body(const U &c, ostream_type &stream) { using std::begin; using std::end; auto it = begin(c); const auto the_end = end(c); if (it != the_end) { for (;;) { stream << *it; if (++it == the_end) break; if (delimiters_type::values.delimiter != NULL) stream << delimiters_type::values.delimiter; } } } }; print_container_helper(const T &container) : container_(container) {} inline void operator()(ostream_type &stream) const { if (delimiters_type::values.prefix != NULL) stream << delimiters_type::values.prefix; printer<T>::print_body(container_, stream); if (delimiters_type::values.postfix != NULL) stream << delimiters_type::values.postfix; } private: const T &container_; }; // Specialization for pairs template <typename T, typename TChar, typename TCharTraits, typename TDelimiters> template <typename T1, typename T2> struct print_container_helper<T, TChar, TCharTraits, TDelimiters>::printer<std::pair<T1, T2>> { using ostream_type = typename print_container_helper<T, TChar, TCharTraits, TDelimiters>::ostream_type; static void print_body(const std::pair<T1, T2> &c, ostream_type &stream) { stream << c.first; if (print_container_helper<T, TChar, TCharTraits, TDelimiters>::delimiters_type::values .delimiter != NULL) stream << print_container_helper<T, TChar, TCharTraits, TDelimiters>::delimiters_type::values .delimiter; stream << c.second; } }; // Specialization for tuples template <typename T, typename TChar, typename TCharTraits, typename TDelimiters> template <typename... Args> struct print_container_helper<T, TChar, TCharTraits, TDelimiters>::printer<std::tuple<Args...>> { using ostream_type = typename print_container_helper<T, TChar, TCharTraits, TDelimiters>::ostream_type; using element_type = std::tuple<Args...>; template <std::size_t I> struct Int {}; static void print_body(const element_type &c, ostream_type &stream) { tuple_print(c, stream, Int<0>()); } static void tuple_print(const element_type &, ostream_type &, Int<sizeof...(Args)>) {} static void tuple_print(const element_type &c, ostream_type &stream, typename std::conditional<sizeof...(Args) != 0, Int<0>, std::nullptr_t>::type) { stream << std::get<0>(c); tuple_print(c, stream, Int<1>()); } template <std::size_t N> static void tuple_print(const element_type &c, ostream_type &stream, Int<N>) { if (print_container_helper<T, TChar, TCharTraits, TDelimiters>::delimiters_type::values .delimiter != NULL) stream << print_container_helper<T, TChar, TCharTraits, TDelimiters>::delimiters_type::values .delimiter; stream << std::get<N>(c); tuple_print(c, stream, Int<N + 1>()); } }; // Prints a print_container_helper to the specified stream. template <typename T, typename TChar, typename TCharTraits, typename TDelimiters> inline std::basic_ostream<TChar, TCharTraits> &operator<<( std::basic_ostream<TChar, TCharTraits> &stream, const print_container_helper<T, TChar, TCharTraits, TDelimiters> &helper) { helper(stream); return stream; } // Basic is_container template; specialize to derive from std::true_type for all // desired container types template <typename T> struct is_container : public std::integral_constant<bool, detail::has_const_iterator<T>::value && detail::has_begin_end<T>::beg_value && detail::has_begin_end<T>::end_value> {}; template <typename T, std::size_t N> struct is_container<T[N]> : std::true_type {}; template <std::size_t N> struct is_container<char[N]> : std::false_type {}; template <typename T> struct is_container<std::valarray<T>> : std::true_type {}; template <typename T1, typename T2> struct is_container<std::pair<T1, T2>> : std::true_type {}; template <typename... Args> struct is_container<std::tuple<Args...>> : std::true_type {}; // Default delimiters template <typename T> struct delimiters<T, char> { static const delimiters_values<char> values; }; template <typename T> const delimiters_values<char> delimiters<T, char>::values = {"[", ", ", "]"}; template <typename T> struct delimiters<T, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T> const delimiters_values<wchar_t> delimiters<T, wchar_t>::values = {L"[", L", ", L"]"}; // Delimiters for (multi)set and unordered_(multi)set template <typename T, typename TComp, typename TAllocator> struct delimiters<::std::set<T, TComp, TAllocator>, char> { static const delimiters_values<char> values; }; template <typename T, typename TComp, typename TAllocator> const delimiters_values<char> delimiters<::std::set<T, TComp, TAllocator>, char>::values = {"{", ", ", "}"}; template <typename T, typename TComp, typename TAllocator> struct delimiters<::std::set<T, TComp, TAllocator>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T, typename TComp, typename TAllocator> const delimiters_values<wchar_t> delimiters<::std::set<T, TComp, TAllocator>, wchar_t>::values = { L"{", L", ", L"}"}; template <typename T, typename TComp, typename TAllocator> struct delimiters<::std::multiset<T, TComp, TAllocator>, char> { static const delimiters_values<char> values; }; template <typename T, typename TComp, typename TAllocator> const delimiters_values<char> delimiters<::std::multiset<T, TComp, TAllocator>, char>::values = {"{", ", ", "}"}; template <typename T, typename TComp, typename TAllocator> struct delimiters<::std::multiset<T, TComp, TAllocator>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T, typename TComp, typename TAllocator> const delimiters_values<wchar_t> delimiters<::std::multiset<T, TComp, TAllocator>, wchar_t>::values = { L"{", L", ", L"}"}; template <typename T, typename THash, typename TEqual, typename TAllocator> struct delimiters<::std::unordered_set<T, THash, TEqual, TAllocator>, char> { static const delimiters_values<char> values; }; template <typename T, typename THash, typename TEqual, typename TAllocator> const delimiters_values<char> delimiters< ::std::unordered_set<T, THash, TEqual, TAllocator>, char>::values = { "{", ", ", "}"}; template <typename T, typename THash, typename TEqual, typename TAllocator> struct delimiters<::std::unordered_set<T, THash, TEqual, TAllocator>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T, typename THash, typename TEqual, typename TAllocator> const delimiters_values<wchar_t> delimiters< ::std::unordered_set<T, THash, TEqual, TAllocator>, wchar_t>::values = { L"{", L", ", L"}"}; template <typename T, typename THash, typename TEqual, typename TAllocator> struct delimiters<::std::unordered_multiset<T, THash, TEqual, TAllocator>, char> { static const delimiters_values<char> values; }; template <typename T, typename THash, typename TEqual, typename TAllocator> const delimiters_values<char> delimiters< ::std::unordered_multiset<T, THash, TEqual, TAllocator>, char>::values = { "{", ", ", "}"}; template <typename T, typename THash, typename TEqual, typename TAllocator> struct delimiters<::std::unordered_multiset<T, THash, TEqual, TAllocator>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T, typename THash, typename TEqual, typename TAllocator> const delimiters_values<wchar_t> delimiters<::std::unordered_multiset<T, THash, TEqual, TAllocator>, wchar_t>::values = {L"{", L", ", L"}"}; // Delimiters for pair and tuple template <typename T1, typename T2> struct delimiters<std::pair<T1, T2>, char> { static const delimiters_values<char> values; }; template <typename T1, typename T2> const delimiters_values<char> delimiters<std::pair<T1, T2>, char>::values = { "(", ", ", ")"}; template <typename T1, typename T2> struct delimiters<::std::pair<T1, T2>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename T1, typename T2> const delimiters_values<wchar_t> delimiters<::std::pair<T1, T2>, wchar_t>::values = {L"(", L", ", L")"}; template <typename... Args> struct delimiters<std::tuple<Args...>, char> { static const delimiters_values<char> values; }; template <typename... Args> const delimiters_values<char> delimiters<std::tuple<Args...>, char>::values = { "(", ", ", ")"}; template <typename... Args> struct delimiters<::std::tuple<Args...>, wchar_t> { static const delimiters_values<wchar_t> values; }; template <typename... Args> const delimiters_values<wchar_t> delimiters<::std::tuple<Args...>, wchar_t>::values = {L"(", L", ", L")"}; // Type-erasing helper class for easy use of custom delimiters. // Requires TCharTraits = std::char_traits<TChar> and TChar = char or wchar_t, // and MyDelims needs to be defined for TChar. Usage: "cout << // pretty_print::custom_delims<MyDelims>(x)". struct custom_delims_base { virtual ~custom_delims_base() {} virtual std::ostream &stream(::std::ostream &) = 0; virtual std::wostream &stream(::std::wostream &) = 0; }; template <typename T, typename Delims> struct custom_delims_wrapper : custom_delims_base { custom_delims_wrapper(const T &t_) : t(t_) {} std::ostream &stream(std::ostream &s) { return s << print_container_helper<T, char, std::char_traits<char>, Delims>( t); } std::wostream &stream(std::wostream &s) { return s << print_container_helper<T, wchar_t, std::char_traits<wchar_t>, Delims>(t); } private: const T &t; }; template <typename Delims> struct custom_delims { template <typename Container> custom_delims(const Container &c) : base(new custom_delims_wrapper<Container, Delims>(c)) {} std::unique_ptr<custom_delims_base> base; }; template <typename TChar, typename TCharTraits, typename Delims> inline std::basic_ostream<TChar, TCharTraits> & operator<<(std::basic_ostream<TChar, TCharTraits> &s, const custom_delims<Delims> &p) { return p.base->stream(s); } // A wrapper for a C-style array given as pointer-plus-size. // Usage: std::cout << pretty_print_array(arr, n) << std::endl; template <typename T> struct array_wrapper_n { typedef const T *const_iterator; typedef T value_type; array_wrapper_n(const T *const a, size_t n) : _array(a), _n(n) {} inline const_iterator begin() const { return _array; } inline const_iterator end() const { return _array + _n; } private: const T *const _array; size_t _n; }; // A wrapper for hash-table based containers that offer local iterators to each // bucket. Usage: std::cout << bucket_print(m, 4) << std::endl; (Prints bucket // 5 of container m.) template <typename T> struct bucket_print_wrapper { typedef typename T::const_local_iterator const_iterator; typedef typename T::size_type size_type; const_iterator begin() const { return m_map.cbegin(n); } const_iterator end() const { return m_map.cend(n); } bucket_print_wrapper(const T &m, size_type bucket) : m_map(m), n(bucket) {} private: const T &m_map; const size_type n; }; } // namespace pretty_print // Global accessor functions for the convenience wrappers template <typename T> inline pretty_print::array_wrapper_n<T> pretty_print_array(const T *const a, size_t n) { return pretty_print::array_wrapper_n<T>(a, n); } template <typename T> pretty_print::bucket_print_wrapper<T> bucket_print(const T &m, typename T::size_type n) { return pretty_print::bucket_print_wrapper<T>(m, n); } // Main magic entry point: An overload snuck into namespace std. // Can we do better? namespace std { // Prints a container to the stream using default delimiters template <typename T, typename TChar, typename TCharTraits> inline typename enable_if<::pretty_print::is_container<T>::value, basic_ostream<TChar, TCharTraits> &>::type operator<<(basic_ostream<TChar, TCharTraits> &stream, const T &container) { return stream << ::pretty_print::print_container_helper<T, TChar, TCharTraits>( container); } } // namespace std #endif // H_PRETTY_PRINT
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tensorview/pybind_utils.h
C/C++ Header
// Copyright 2019-2020 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "tensor.h" #include "tensorview.h" #include <algorithm> #include <array> #include <iostream> #include <pybind11/functional.h> #include <pybind11/numpy.h> #include <pybind11/pybind11.h> #include <pybind11/stl.h> namespace py = pybind11; namespace tv { template <typename Tarr> bool is_c_style(const Tarr &arr) { return bool(arr.flags() & py::array::c_style); } template <typename T, int Rank = -1> TensorView<T, Rank> arrayt2tv(py::array_t<T> arr) { TV_ASSERT_INVALID_ARG(is_c_style(arr), "array must be c-contiguous array"); Shape shape; for (int i = 0; i < arr.ndim(); ++i) { shape.push_back(arr.shape(i)); } if (Rank >= 0) { TV_ASSERT_INVALID_ARG(shape.ndim() == Rank, "error"); } return TensorView<T, Rank>(arr.mutable_data(), shape); } template <typename T, int Rank = -1> TensorView<const T> carrayt2tv(py::array_t<T> arr) { TV_ASSERT_INVALID_ARG(is_c_style(arr), "array must be c-contiguous array"); Shape shape; for (int i = 0; i < arr.ndim(); ++i) { shape.push_back(arr.shape(i)); } if (Rank >= 0) { TV_ASSERT_INVALID_ARG(shape.ndim() == Rank, "error"); } return TensorView<const T, Rank>(arr.data(), shape); } template <typename Tarr> tv::DType get_array_tv_dtype(const Tarr &arr) { switch (arr.dtype().kind()) { case 'b': return tv::bool_; case 'i': { switch (arr.itemsize()) { case 1: return tv::int8; case 2: return tv::int16; case 4: return tv::int32; case 8: return tv::int64; default: break; } } case 'u': { switch (arr.itemsize()) { case 1: return tv::uint8; case 2: return tv::uint16; case 4: return tv::uint32; case 8: return tv::uint64; default: break; } } case 'f': { switch (arr.itemsize()) { case 2: return tv::float16; case 4: return tv::float32; case 8: return tv::float64; default: break; } } } TV_THROW_RT_ERR("unknown dtype", arr.dtype().kind(), arr.itemsize()); } template <typename Tarr> Tensor array2tensor(Tarr &arr) { TV_ASSERT_INVALID_ARG(is_c_style(arr), "array must be c-contiguous array"); TensorShape shape; for (int i = 0; i < arr.ndim(); ++i) { shape.push_back(arr.shape(i)); } return tv::from_blob(arr.mutable_data(), shape, get_array_tv_dtype(arr), -1); } template <typename T> Tensor arrayt2tensor(py::array_t<T> &arr) { TV_ASSERT_INVALID_ARG(is_c_style(arr), "array must be c-contiguous array"); TensorShape shape; for (int i = 0; i < arr.ndim(); ++i) { shape.push_back(arr.shape(i)); } return tv::from_blob(arr.mutable_data(), shape, tv::type_v<T>, -1); } template <typename TDType> py::dtype tv_dtype_to_py(TDType d) { switch (d) { case float32: return py::dtype("float32"); case float64: return py::dtype("float64"); case float16: return py::dtype("float16"); case int32: return py::dtype("int32"); case int16: return py::dtype("int16"); case int8: return py::dtype("int8"); case int64: return py::dtype("int64"); case uint32: return py::dtype("uint32"); case uint16: return py::dtype("uint16"); case uint8: return py::dtype("uint8"); case uint64: return py::dtype("uint64"); case bool_: return py::dtype("bool_"); default:; } TV_THROW_INVALID_ARG("unknown dtype", d); } // add template to define function in header template <typename Ttensor> py::array tensor2array(Ttensor &tensor) { // you cant call this function during GIL released. TV_ASSERT_INVALID_ARG(tensor.device() == -1, "must be cpu tensor"); auto shape = tensor.shape(); std::vector<int> shape_vec(shape.begin(), shape.end()); auto dtype = tv_dtype_to_py(tensor.dtype()); // construct py::array will copy content from ptr. // its expected because we can't transfer ownership from // c++ tv::Tensor to numpy array when c++ object is deleted. return py::array(dtype, shape_vec, {}, tensor.raw_data()); } } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tensorview/tensor.h
C/C++ Header
// Copyright 2019-2020 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* tv::Tensor is a lightweight header-only tensor container without template and annoying dependencies. no algorithm is implemented. it should only be used when you want a no-template simple container but dont want to link with libtorch. If you can use libtorch, dont use tv::Tensor. */ #pragma once #include "mp_helper.h" #include "tensorview.h" #include <cstring> #include <iomanip> #include <memory> #include <type_traits> #ifdef TV_CUDA #include <cuda_fp16.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #endif namespace tv { enum DType { float32, int32, int16, int8, float64, bool_, uint8, float16, int64, uint16, uint32, uint64 }; namespace detail { using dtype_collection_t = tv::mp_list_c<int, float32, int32, int16, int8, float64, bool_, uint8, float16, int64, uint16, uint32, uint64>; #ifdef TV_CUDA using all_tensor_types_t = std::tuple<float, double, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, bool>; #else using all_tensor_types_t = std::tuple<float, double, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, bool>; #endif template <typename T> class TensorStorage { public: TensorStorage(size_t size, int device = -1, bool managed = false, bool pinned = false) : mSize(size), device_(device), managed_(managed), pinned_(pinned) { if (size == 0) { mPtr = nullptr; } else { if (device == -1) { if (pinned_) { #ifdef TV_CUDA checkCudaErrors(cudaMallocHost(&mPtr, size * sizeof(T))); #else TV_THROW_INVALID_ARG("you need to define TV_CUDA to use pinned"); #endif } else { mPtr = new T[size]; } } else { #ifdef TV_CUDA // we should select device in external /* int deviceCount; cudaGetDeviceCount(&deviceCount); if (device >= deviceCount) { TV_THROW_INVALID_ARG("you provide device ", device, " but you only have ", deviceCount, " device."); } cudaSetDevice(device); */ if (managed) { checkCudaErrors(cudaMallocManaged(&this->mPtr, size * sizeof(T))); } else { checkCudaErrors(cudaMalloc(&mPtr, size * sizeof(T))); } #else TV_THROW_INVALID_ARG("don't compiled with cuda"); #endif } } } TensorStorage(T *ptr, size_t size, int device) : mSize(size), mPtr(ptr), from_blob_(true), device_(device) {} virtual ~TensorStorage() { if (empty()) { return; } if (from_blob_) { return; } if (device_ == -1) { if (pinned_) { #ifdef TV_CUDA cudaFreeHost(mPtr); #endif } else { delete[] mPtr; } } else { #ifdef TV_CUDA cudaFree(mPtr); #endif } }; inline size_t size() const { return mSize; } T *data() { return mPtr; } const T *data() const { return mPtr; } bool empty() const { return mPtr == nullptr || mSize == 0; } bool managed() const { return managed_; } bool pinned() const { return pinned_; } int device() const { return device_; } void zero_() { if (device_ == -1) { std::memset(data(), 0, mSize); // std::fill(data(), data() + mSize, 0); } else { #ifdef TV_CUDA checkCudaErrors(cudaMemset(data(), 0, mSize / sizeof(T))); #else TV_THROW_INVALID_ARG("don't compiled with cuda"); #endif } } private: size_t mSize = 0; T *mPtr = nullptr; bool from_blob_ = false; int device_ = -1; bool managed_ = false; bool pinned_ = false; }; template <typename T> size_t sizeof_dtype(T dtype) { switch (dtype) { case float32: return sizeof(float); case int8: return sizeof(int8_t); case int16: return sizeof(int16_t); case int32: return sizeof(int32_t); case float64: return sizeof(double); case int64: return sizeof(int64_t); case bool_: return sizeof(bool); case uint8: return sizeof(uint8_t); case uint16: return sizeof(uint16_t); case uint32: return sizeof(uint32_t); case uint64: return sizeof(uint64_t); case float16: return 2; default: TV_THROW_RT_ERR("unsupported dtype"); } return 0; } template <typename T> std::string typeString(T t) { switch (t) { case DType::bool_: return "bool"; case DType::float32: return "float32"; case DType::int8: return "int8"; case DType::int16: return "int16"; case DType::int32: return "int32"; case DType::float64: return "float64"; case DType::int64: return "int64"; case DType::uint8: return "uint8"; case DType::uint16: return "uint16"; case DType::uint32: return "uint32"; case DType::uint64: return "uint64"; case DType::float16: return "half"; default: return ""; } } template <typename T> struct TypeToDtypeTraits; template <> struct TypeToDtypeTraits<int32_t> { static constexpr DType dtype = int32; }; #ifdef TV_CUDA template <> struct TypeToDtypeTraits<__half> { static constexpr DType dtype = float16; }; #endif template <> struct TypeToDtypeTraits<float> { static constexpr DType dtype = float32; }; template <> struct TypeToDtypeTraits<double> { static constexpr DType dtype = float64; }; template <> struct TypeToDtypeTraits<int16_t> { static constexpr DType dtype = int16; }; template <> struct TypeToDtypeTraits<int8_t> { static constexpr DType dtype = int8; }; template <> struct TypeToDtypeTraits<int64_t> { static constexpr DType dtype = int64; }; template <> struct TypeToDtypeTraits<uint8_t> { static constexpr DType dtype = uint8; }; template <> struct TypeToDtypeTraits<uint16_t> { static constexpr DType dtype = uint16; }; template <> struct TypeToDtypeTraits<uint32_t> { static constexpr DType dtype = uint32; }; template <> struct TypeToDtypeTraits<uint64_t> { static constexpr DType dtype = uint64; }; template <> struct TypeToDtypeTraits<bool> { static constexpr DType dtype = bool_; }; template <> struct TypeToDtypeTraits<const int32_t> { static constexpr DType dtype = int32; }; #ifdef TV_CUDA template <> struct TypeToDtypeTraits<const __half> { static constexpr DType dtype = float16; }; #endif template <> struct TypeToDtypeTraits<const float> { static constexpr DType dtype = float32; }; template <> struct TypeToDtypeTraits<const double> { static constexpr DType dtype = float64; }; template <> struct TypeToDtypeTraits<const int16_t> { static constexpr DType dtype = int16; }; template <> struct TypeToDtypeTraits<const int8_t> { static constexpr DType dtype = int8; }; template <> struct TypeToDtypeTraits<const int64_t> { static constexpr DType dtype = int64; }; template <> struct TypeToDtypeTraits<const uint8_t> { static constexpr DType dtype = uint8; }; template <> struct TypeToDtypeTraits<const uint16_t> { static constexpr DType dtype = uint16; }; template <> struct TypeToDtypeTraits<const uint32_t> { static constexpr DType dtype = uint32; }; template <> struct TypeToDtypeTraits<const uint64_t> { static constexpr DType dtype = uint64; }; template <> struct TypeToDtypeTraits<const bool> { static constexpr DType dtype = bool_; }; } // namespace detail template <class T> constexpr DType type_v = detail::TypeToDtypeTraits<T>::dtype; template <class... Ts, typename F> bool dispatch_noexcept(DType t, F &&f) { static_assert(sizeof...(Ts) > 0, "you need to provide at least one type"); bool notFound = true; mp_for_each<mp_list<Ts...>>([=, &notFound, &f](auto I) { if (type_v<TV_DECLTYPE(I)> == t && notFound) { std::forward<F>(f)(TV_DECLTYPE(I)()); notFound = false; } }); return !notFound; } template <class... Ts, typename F> void dispatch(DType t, F &&f) { if (!dispatch_noexcept<Ts...>(t, std::forward<F>(f))) { std::stringstream ss; mp_for_each<mp_list<Ts...>>([=, &ss](auto I) { ss << detail::TypeToString<TV_DECLTYPE(I)>::value << " "; }); TV_THROW_RT_ERR("unknown type", detail::typeString(t), ", available:", ss.str()); } } template <typename T, T... Is, typename F> void dispatch_scalar(T idx, F &&f) { static_assert(sizeof...(Is) > 0, "you need to provide at least one candidate"); bool notFound = true; mp_for_each<mp_list_c<T, Is...>>([=, &notFound, &f](auto I) { if (T(I) == idx && notFound) { std::forward<F>(f)(I); notFound = false; } }); if (notFound) { std::stringstream ss; mp_for_each<mp_list_c<T, Is...>>([=, &ss](auto I) { ss << T(I) << " "; }); TV_THROW_RT_ERR("unknown value", idx, ", available:", ss.str()); } } template <int... Is, typename F> bool dispatch_int_noexcept(int idx, F &&f) { static_assert(sizeof...(Is) > 0, "you need to provide at least one candidate"); bool notFound = true; mp_for_each<mp_list_c<int, Is...>>([=, &notFound, &f](auto I) { if (TV_DECLTYPE(I)::value == idx && notFound) { std::forward<F>(f)(I); notFound = false; } }); return !notFound; } template <int... Is, typename F, class BinaryPredicate> bool dispatch_int_noexcept(int idx, BinaryPredicate p, F &&f) { static_assert(sizeof...(Is) > 0, "you need to provide at least one candidate"); bool notFound = true; mp_for_each<mp_list_c<int, Is...>>([=, &notFound, &f](auto I) { if (p(idx, TV_DECLTYPE(I)::value) && notFound) { std::forward<F>(f)(I); notFound = false; } }); return !notFound; } template <int... Is, typename F> void dispatch_int(int idx, F &&f) { if (!dispatch_int_noexcept<Is...>(idx, std::forward<F>(f))) { std::stringstream ss; mp_for_each<mp_list_c<int, Is...>>( [=, &ss](auto I) { ss << TV_DECLTYPE(I)::value << " "; }); TV_THROW_RT_ERR("unknown value", idx, ", available:", ss.str()); } } template <int... Is, typename F, class BinaryPredicate> void dispatch_int(int idx, BinaryPredicate p, F &&f) { // BinaryPredicate: BinaryPredicate(idx, candidate) if (!dispatch_int_noexcept<Is...>(idx, p, std::forward<F>(f))) { std::stringstream ss; mp_for_each<mp_list_c<int, Is...>>( [=, &ss](auto I) { ss << TV_DECLTYPE(I)::value << " "; }); TV_THROW_RT_ERR("unknown value", idx, ", available:", ss.str()); } } // Ts is pack of mp_list_c template <class... Ts, typename Iterator, typename F> bool dispatch_container_noexcept(Iterator begin, Iterator end, F &&f) { static_assert(sizeof...(Ts) > 0, "you need to provide at least one candidate"); bool notFound = true; mp_for_each<mp_list<Ts...>>([=, &notFound, &f](auto I) { using val_lst_t = TV_DECLTYPE(I); auto val_lst_size = mp_size<val_lst_t>::value; bool equal = true; std::size_t count = 0; auto iter = begin; mp_for_each<val_lst_t>([&](auto E) { if (iter == end || !equal) { return; } if (count >= val_lst_size) { TV_THROW_INVALID_ARG("iterator length invalid:", val_lst_size); } constexpr auto c = TV_DECLTYPE(E)::value; if (c != *iter) { equal = false; } ++count; std::advance(iter, 1); }); if (count != val_lst_size || iter != end) { equal = false; } if (equal && notFound) { std::forward<F>(f)(I); notFound = false; } }); return !notFound; } template <class... Ts, typename Iterator, typename F> void dispatch_container(Iterator begin, Iterator end, F &&f) { if (!dispatch_container_noexcept<Ts...>(begin, end, std::forward<F>(f))) { std::stringstream ss; ss << "unknown value ["; for (auto iter = begin; iter != end; std::advance(iter, 1)) { ss << *iter << ","; } ss << "], available: "; mp_for_each<mp_list<Ts...>>([=, &ss](auto I) { ss << "["; mp_for_each<TV_DECLTYPE(I)>( [=, &ss](auto E) { ss << TV_DECLTYPE(E)::value << ","; }); ss << "]"; }); TV_THROW_RT_ERR(ss.str()); } } /* template <int... Is, typename F> void dispatch_int(int idx, F &&f) { return dispatch_scalar<int, Is...>(idx, f); } */ template <class T> struct Dispatch; template <template <class...> class T, class... Args> struct Dispatch<T<Args...>> { template <typename F> inline void operator()(DType t, F &&f) { return dispatch<Args...>(t, std::forward<F>(f)); } }; template <class T> struct DispatchContainer; template <template <class...> class T, class... Args> struct DispatchContainer<T<Args...>> { template <typename Iterator, typename F> inline void operator()(Iterator begin, Iterator end, F &&f) { return dispatch_container<Args...>(begin, end, std::forward<F>(f)); } }; template <class T> struct DispatchContainerNoexcept; template <template <class...> class T, class... Args> struct DispatchContainerNoexcept<T<Args...>> { template <typename Iterator, typename F> inline bool operator()(Iterator begin, Iterator end, F &&f) { return dispatch_container_noexcept<Args...>(begin, end, std::forward<F>(f)); } }; template <class T> struct DispatchInt; // Args should be std::integral_constant<int, value> // you need to use type_container<std::integral_constant<int, value>...> // as template parameter of DispatchInt. // tv::mp_list_c is ok. template <template <class...> class T, class... Args> struct DispatchInt<T<Args...>> { template <typename F> inline void operator()(int t, F &&f) { return dispatch_int<Args::value...>(t, std::forward<F>(f)); } template <typename F, typename BinaryPredicate> inline void operator()(int t, BinaryPredicate p, F &&f) { return dispatch_int<Args::value...>(t, p, std::forward<F>(f)); } }; constexpr size_t kTensorMaxDim = 10; using TensorShape = ShapeBase<kTensorMaxDim, int64_t>; struct Tensor { Tensor() {} Tensor(TensorShape shape, TensorShape stride, DType dtype, int device = -1, bool pinned = false, bool managed = false) : dtype_(dtype) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( shape.size() * detail::sizeof_dtype(dtype), device, managed, pinned); shape_ = shape; stride_ = stride; } Tensor(TensorShape shape, DType dtype, int device = -1, bool pinned = false, bool managed = false) : dtype_(dtype) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( shape.size() * detail::sizeof_dtype(dtype), device, managed, pinned); shape_ = shape; stride_ = shape.stride_rowmajor(); } Tensor(void *ptr, TensorShape shape, TensorShape stride, DType dtype, int device = -1) : dtype_(dtype) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( reinterpret_cast<uint8_t *>(ptr), shape.size() * detail::sizeof_dtype(dtype), device); shape_ = shape; stride_ = stride; } Tensor(void *ptr, TensorShape shape, DType dtype, int device = -1) : dtype_(dtype) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( reinterpret_cast<uint8_t *>(ptr), shape.size() * detail::sizeof_dtype(dtype), device); shape_ = shape; stride_ = shape.stride_rowmajor(); } Tensor(const void *ptr, TensorShape shape, TensorShape stride, DType dtype, int device = -1) : dtype_(dtype), writeable_(false) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( reinterpret_cast<uint8_t *>(const_cast<void *>(ptr)), shape.size() * detail::sizeof_dtype(dtype), device); shape_ = shape; stride_ = stride; } Tensor(const void *ptr, TensorShape shape, DType dtype, int device = -1) : dtype_(dtype), writeable_(false) { TV_ASSERT_INVALID_ARG(!shape.empty(), "dont support empty shape"); storage_ = std::make_shared<detail::TensorStorage<uint8_t>>( reinterpret_cast<uint8_t *>(const_cast<void *>(ptr)), shape.size() * detail::sizeof_dtype(dtype), device); shape_ = shape; stride_ = shape.stride_rowmajor(); } Tensor(std::initializer_list<int32_t> init) : Tensor({int(init.size())}, tv::int32) { std::copy(init.begin(), init.end(), data<int32_t>()); } Tensor(std::initializer_list<int64_t> init) : Tensor({int(init.size())}, tv::int64) { std::copy(init.begin(), init.end(), data<int64_t>()); } Tensor(std::initializer_list<float> init) : Tensor({int(init.size())}, tv::float32) { std::copy(init.begin(), init.end(), data<float>()); } Tensor(std::initializer_list<double> init) : Tensor({int(init.size())}, tv::float64) { std::copy(init.begin(), init.end(), data<double>()); } template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int, typename std::enable_if<(Rank > 0), int>::type = 0> TensorView<T, Rank, PtrTraits, Tindex> tview() { using tv_shape_t = typename TensorView<T, Rank, PtrTraits, Tindex>::tv_shape_t; writable_check(); static_assert(Rank == -1 || Rank > 0, "error"); TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); tv_shape_t shape(Rank), stride(Rank); for (int i = 0; i < Rank; ++i) { shape[i] = shape_[i]; stride[i] = stride_[i]; } return TensorView<T, Rank, PtrTraits, Tindex>( reinterpret_cast<T *>(data<T>()), shape, stride); } template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int, typename std::enable_if<Rank == -1, int>::type = 0> TensorView<T, Rank, PtrTraits, Tindex> tview() { writable_check(); static_assert(Rank == -1 || Rank > 0, "error"); TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); ShapeBase<TV_MAX_DIM, Tindex> shape(ndim()), stride(ndim()); for (size_t i = 0; i < ndim(); ++i) { shape[i] = shape_[i]; stride[i] = stride_[i]; } return TensorView<T, Rank, PtrTraits, Tindex>( reinterpret_cast<T *>(data<T>()), shape, stride); } template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int, typename std::enable_if<(Rank > 0), int>::type = 0> TensorView<const std::remove_const_t<T>, Rank, PtrTraits, Tindex> tview() const { static_assert(Rank == -1 || Rank > 0, "error"); if (Rank > 0) { TV_ASSERT_RT_ERR(Rank == ndim(), "error"); } TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); ShapeBase<Rank == -1 ? TV_MAX_DIM : Rank, Tindex> shape(Rank), stride(Rank); for (int i = 0; i < Rank; ++i) { shape[i] = shape_[i]; stride[i] = stride_[i]; } return TensorView<const std::remove_const_t<T>, Rank, PtrTraits, Tindex>( reinterpret_cast<const std::remove_const_t<T> *>(data<T>()), shape, stride); } template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int, typename std::enable_if<Rank == -1, int>::type = 0> TensorView<const std::remove_const_t<T>, Rank, PtrTraits, Tindex> tview() const { static_assert(Rank == -1 || Rank > 0, "error"); if (Rank > 0) { TV_ASSERT_RT_ERR(Rank == ndim(), "error"); } TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); ShapeBase<TV_MAX_DIM, Tindex> shape(ndim()), stride(ndim()); for (int i = 0; i < int(ndim()); ++i) { shape[i] = shape_[i]; stride[i] = stride_[i]; } return TensorView<const std::remove_const_t<T>, Rank, PtrTraits, Tindex>( reinterpret_cast<const std::remove_const_t<T> *>(data<T>()), shape, stride); } template <class... Inds> Tensor view(Inds... newShapes) const { static_assert(sizeof...(newShapes) > 0, "dont support empty for now"); TensorShape shape{int(newShapes)...}; bool found_minus_1 = false; for (size_t i = 0; i < shape.ndim(); ++i) { if (!found_minus_1) { if (shape[i] == -1) { shape[i] = 1; shape[i] = size() / shape.size(); found_minus_1 = true; } else { TV_ASSERT_INVALID_ARG(shape[i] > 0, "shape except -1 must larger than 0"); } } else { TV_ASSERT_INVALID_ARG(shape[i] > 0, "multiple -1 in your argument."); } } TV_ASSERT_RT_ERR(shape.size() == size(), "error"); Tensor res(*this); res.shape_ = shape; res.stride_ = shape.stride_rowmajor(); return res; } Tensor view(TensorShape shape) const { TV_ASSERT_RT_ERR(shape.size() == size(), "error"); Tensor res(*this); res.shape_ = shape; res.stride_ = shape.stride_rowmajor(); return res; } Tensor operator[](int64_t index) { TV_ASSERT_INVALID_ARG(ndim() > 1, "error"); if (index < 0) { index += dim(0); } TV_ASSERT_INVALID_ARG(index < dim(0), "error"); Tensor res = Tensor(); res.storage_ = storage_; res.shape_ = shape_.subshape(1); res.offset_ = offset_ + index * stride_[0]; res.stride_ = stride_.subshape(1); res.writeable_ = writeable_; return res; } Tensor squeeze() const { return view(shape_.squeeze()); } Tensor squeeze(int axis) const { if (axis < 0) { axis = ndim() + axis; } return view(shape_.squeeze(axis)); } Tensor unsqueeze(int axis) const { if (axis < 0) { axis = ndim() + axis; } return view(shape_.unsqueeze(axis)); } bool pinned() const { return storage_->pinned(); } Tensor slice_first_axis(int start, int end) const { TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); if (start < 0) { start = shape_[0] + start; } if (end < 0) { end = shape_[0] + end; } TV_ASSERT_INVALID_ARG(start < shape_[0], "start must small than dim 0"); TV_ASSERT_INVALID_ARG(start < end, "start must small than end"); size_t new_offset = start * shape_.prod(1) * itemsize(); Tensor res(*this); TensorShape newshape(shape_); newshape[0] = end - start; res.shape_ = newshape; res.stride_ = stride_; res.offset_ = new_offset; return res; } bool empty() const { return storage_->empty(); } DType dtype() const { return dtype_; } int device() const { return storage_->device(); } size_t ndim() const { return shape_.ndim(); } const TensorShape &shape() const { return shape_; } const TensorShape &sizes() const { return shape_; } const TensorShape &stride() const { return stride_; } int dim(int idx) const { if (idx < 0) { TV_ASSERT_RT_ERR(shape_.size() + idx < shape_.size(), idx, shape_); return shape_[shape_.size() + idx]; } else { TV_ASSERT_RT_ERR(idx < int(shape_.size()), idx, shape_); return shape_[idx]; } } const uint8_t *raw_data() const { return storage_->data() + offset_; } size_t raw_size() const { return size() * itemsize(); } size_t size() const { return shape_.size(); } size_t size(int64_t idx) const { return dim(idx); } size_t itemsize() const { return detail::sizeof_dtype(dtype_); } Tensor &zero_() { writable_check(); storage_->zero_(); return *this; } uint8_t *raw_data() { writable_check(); return storage_->data() + offset_; } template <typename T> Tensor &fill_(T value) { writable_check(); TV_ASSERT_RT_ERR(device() == -1, "error"); Dispatch<detail::all_tensor_types_t>()(dtype_, [&](auto I) { using Treal = TV_DECLTYPE(I); if (std::is_convertible<T, Treal>::value) { auto ptr = reinterpret_cast<Treal *>(raw_data()); std::fill(ptr, ptr + size(), Treal(value)); } else { TV_THROW_INVALID_ARG("not convertable from", type_s<T>, "to", type_s<Treal>); } }); return *this; } template <typename T> T *data() { TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); writable_check(); return reinterpret_cast<T *>(raw_data()); } template <typename T> const T *data() const { TV_ASSERT_RT_ERR(dtype_ == type_v<T>, "error"); return reinterpret_cast<const T *>(raw_data()); } template <typename T> T *data_ptr() { return data<T>(); } template <typename T> const T *data_ptr() const { return data<T>(); } void *data_ptr() { return reinterpret_cast<void *>(raw_data()); } const void *data_ptr() const { return reinterpret_cast<const void *>(raw_data()); } void copy_(const Tensor &tensor) { writable_check(); TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); TV_ASSERT_RT_ERR(!empty() && !tensor.empty(), "must not empty"); TV_ASSERT_RT_ERR(size() == tensor.size(), "must have same size"); TV_ASSERT_RT_ERR(dtype() == tensor.dtype(), "must have same dtype", detail::typeString(dtype()), detail::typeString(tensor.dtype())); if (device() == -1 && tensor.device() == -1) { #ifdef TV_CUDA host2host(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_)); #else std::copy(tensor.raw_data(), tensor.raw_data() + size() * detail::sizeof_dtype(dtype_), storage_->data()); #endif } #ifdef TV_CUDA else if (device() >= 0 && tensor.device() == -1) { host2dev(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_)); } else if (device() == -1 && tensor.device() >= 0) { dev2host(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_)); } else if (device() >= 0 && tensor.device() >= 0) { dev2dev(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_)); } #endif else { TV_THROW_RT_ERR("only support cpu tensor"); } } #ifdef TV_CUDA void copy_(const Tensor &tensor, cudaStream_t stream) { writable_check(); TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); TV_ASSERT_RT_ERR(!empty() && !tensor.empty(), "must not empty"); TV_ASSERT_RT_ERR(size() == tensor.size(), "must have same size"); TV_ASSERT_RT_ERR(dtype() == tensor.dtype(), "must have same dtype", detail::typeString(dtype()), detail::typeString(tensor.dtype())); if (device() == -1 && tensor.device() == -1) { host2host(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_), stream); } else if (device() >= 0 && tensor.device() == -1) { host2dev(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_), stream); } else if (device() == -1 && tensor.device() >= 0) { dev2host(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_), stream); } else if (device() >= 0 && tensor.device() >= 0) { dev2dev(storage_->data(), tensor.raw_data(), size() * detail::sizeof_dtype(dtype_), stream); } else { TV_THROW_RT_ERR("only support cpu tensor"); } } #endif Tensor cpu() const { if (storage_->device() == -1) { // cpu() should always copy tensor. return clone(); } Tensor res(shape_, stride_, dtype_, -1, storage_->managed()); res.copy_(*this); return res; } template <typename T> void copy_(const TensorView<T> &tensor, int device) { writable_check(); TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); Tensor src = from_blob(tensor, device); return copy_(src); } Tensor &operator=(const Tensor &tensor) { dtype_ = tensor.dtype_; storage_ = tensor.storage_; shape_ = tensor.shape_; writeable_ = tensor.writeable_; offset_ = tensor.offset_; stride_ = tensor.stride_; return *this; } Tensor(const Tensor &tensor) { dtype_ = tensor.dtype_; storage_ = tensor.storage_; shape_ = tensor.shape_; writeable_ = tensor.writeable_; offset_ = tensor.offset_; stride_ = tensor.stride_; } Tensor clone(bool pinned = false) const { TV_ASSERT_RT_ERR(!empty(), "clone a empty tensor"); TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); Tensor newtensor(shape_, stride_, dtype_, device(), pinned, storage_->managed()); newtensor.copy_(*this); return newtensor; } Tensor astype(DType dtype) { if (dtype == dtype_) { return clone(); } TV_ASSERT_INVALID_ARG(device() == -1, "only support cpu tensor"); TV_ASSERT_INVALID_ARG(!empty(), "can't be used in empty tensor"); TV_ASSERT_INVALID_ARG(contiguous_, "only support contiguous for now"); auto tensor = Tensor(); Dispatch<detail::all_tensor_types_t>()(dtype, [&](auto Idst) { using Tdst = TV_DECLTYPE(Idst); Dispatch<detail::all_tensor_types_t>()(this->dtype_, [&](auto Icur) { using Tcur = TV_DECLTYPE(Icur); if (std::is_convertible<Tcur, Tdst>::value) { auto ptr = this->data<Tcur>(); tensor = Tensor(this->shape_, this->stride_, dtype, this->device(), this->pinned(), this->storage_->managed()); std::copy(ptr, ptr + this->size(), tensor.data<Tdst>()); } else { TV_THROW_INVALID_ARG("not convertable from", type_s<Tcur>, "to", type_s<Tdst>); } }); }); return tensor; } template <class... Ts, typename F> inline void dispatch(F &&f) { return tv::dispatch<Ts...>(dtype_, std::forward<F>(f)); } protected: inline void writable_check() { TV_ASSERT_RT_ERR(writeable_, "you cant do non-const operation when not writable"); } DType dtype_; std::shared_ptr<detail::TensorStorage<uint8_t>> storage_; TensorShape shape_; size_t offset_ = 0; TensorShape stride_; private: bool writeable_ = true; bool contiguous_ = true; }; template <typename Os> Os &operator<<(Os &os, const Tensor &tensor) { TV_ASSERT_INVALID_ARG(tensor.device() == -1, "must be cpu tensor"); Dispatch<detail::all_tensor_types_t>()(tensor.dtype(), [&](auto I) { using T = TV_DECLTYPE(I); std::stringstream ss; if (std::is_same<T, float>::value || std::is_same<T, double>::value) { ss << std::setprecision(4); } os << tensor.tview<T, -1, DefaultPtrTraits, int64_t>().repr(ss); }); return os; } inline Tensor from_blob(void *ptr, TensorShape shape, DType dtype, int device) { return Tensor(ptr, shape, dtype, device); } inline Tensor from_blob(const void *ptr, TensorShape shape, DType dtype, int device) { return Tensor(ptr, shape, dtype, device); } } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tensorview/tensorview.h
C/C++ Header
// Copyright 2019-2020 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "common.h" #include "prettyprint.h" #include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <vector> #ifdef TV_CUDA #include <cuda_runtime_api.h> #endif namespace tv { #if (defined(__clang__) && defined(__CUDA__)) || defined(__NVCC__) #define TV_HOST_DEVICE_INLINE __forceinline__ __device__ __host__ #define TV_DEVICE_INLINE __forceinline__ __device__ #define TV_HOST_DEVICE __device__ __host__ #define TV_ASSERT(expr) assert(expr) #elif defined(__CUDACC_RTC__) #define TV_ASSERT(expr) assert(expr) #define TV_HOST_DEVICE_INLINE __forceinline__ __device__ #define TV_DEVICE_INLINE __forceinline__ __device__ #define TV_HOST_DEVICE __device__ __host__ #else #define TV_ASSERT(x) assert(x) #define TV_HOST_DEVICE_INLINE inline #define TV_HOST_DEVICE #endif #define TV_REQUIRE(expr, ...) \ { \ if (!(expr)) { \ printf(__VA_ARGS__); \ assert(expr); \ } \ } #define TV_CHECK_CUDA_ERR() \ { \ auto __macro_err = cudaGetLastError(); \ if (__macro_err != cudaSuccess) { \ std::stringstream __macro_s; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << "cuda execution failed with error " << __macro_err; \ TV_BACKTRACE_PRINT(__macro_s); \ throw std::runtime_error(__macro_s.str()); \ } \ } #define TV_CHECK_CUDA_ERR_V2(...) \ { \ auto __macro_err = cudaGetLastError(); \ if (__macro_err != cudaSuccess) { \ std::stringstream __macro_s; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << "cuda execution failed with error " << __macro_err; \ __macro_s << " " << cudaGetErrorString(__macro_err) << "\n"; \ tv::sstream_print(__macro_s, __VA_ARGS__); \ TV_BACKTRACE_PRINT(__macro_s); \ throw std::runtime_error(__macro_s.str()); \ } \ } #ifdef TV_CUDA struct GPU { GPU(cudaStream_t s = 0) : mStream(s) {} virtual cudaStream_t getStream() const { return mStream; } cudaStream_t mStream = 0; }; #endif struct CPU {}; #ifndef TV_MAX_DIM #define TV_MAX_DIM 6 #endif template <typename T> struct DefaultPtrTraits { typedef T *type; }; #if defined(__CUDACC__) || defined(__HIPCC__) template <typename T> struct RestrictPtrTraits { typedef T *__restrict__ type; }; #endif /* template <typename T> constexpr size_t calc_align(size_t ndim) { if (ndim * sizeof(T) == 1) return 1; else if (ndim * sizeof(T) == 2) return 2; else if (ndim * sizeof(T) <= 4 && ndim * sizeof(T) > 2) return 4; else if (ndim * sizeof(T) <= 8 && ndim * sizeof(T) > 4) return 8; else if (ndim * sizeof(T) <= 16 && ndim * sizeof(T) > 8) return 16; else if (ndim * sizeof(T) <= 32 && ndim * sizeof(T) > 16) return 32; else return 64; } */ namespace detail { template <typename _InIter> using _RequireInputIter = typename std::enable_if<std::is_convertible< typename std::iterator_traits<_InIter>::iterator_category, std::input_iterator_tag>::value>::type; } template <typename T, size_t MaxDim = TV_MAX_DIM> struct /*alignas(calc_align<T>(MaxDim))*/ SimpleVector { public: TV_HOST_DEVICE_INLINE SimpleVector(){}; TV_HOST_DEVICE_INLINE SimpleVector(size_t count, T init = T()) : size_(count) { for (size_t i = 0; i < count; ++i) { array_[i] = init; } }; template <typename Iterator, typename = detail::_RequireInputIter<Iterator>> SimpleVector(Iterator first, Iterator last) { size_ = 0; for (; first != last; ++first) { if (size_ >= MaxDim) { TV_THROW_INVALID_ARG("iterator too long"); } array_[size_++] = *first; } }; TV_HOST_DEVICE_INLINE SimpleVector(std::initializer_list<T> q) { TV_ASSERT(q.size() <= MaxDim); size_ = 0; for (T s : q) { array_[size_++] = s; } size_ = q.size(); } SimpleVector(const std::vector<T> &arr) { TV_ASSERT(arr.size() <= MaxDim); for (size_t i = 0; i < arr.size(); ++i) { array_[i] = arr[i]; } size_ = arr.size(); } TV_HOST_DEVICE_INLINE SimpleVector(const SimpleVector<T, MaxDim> &arr) { TV_ASSERT(arr.size() <= MaxDim); for (size_t i = 0; i < arr.size(); ++i) { array_[i] = arr[i]; } size_ = arr.size(); } TV_HOST_DEVICE_INLINE T &operator[](int idx) { #ifdef TV_DEBUG TV_ASSERT(idx >= 0 && idx < size_); #endif return array_[idx]; } TV_HOST_DEVICE_INLINE const T &operator[](int idx) const { #ifdef TV_DEBUG TV_ASSERT(idx >= 0 && idx < size_); #endif return array_[idx]; } TV_HOST_DEVICE_INLINE void push_back(T s) { #ifdef TV_DEBUG TV_ASSERT(size_ < MaxDim); #endif array_[size_] = s; size_++; } TV_HOST_DEVICE_INLINE void pop_back() { #ifdef TV_DEBUG TV_ASSERT(size_ > 0); #endif size_--; } TV_HOST_DEVICE_INLINE size_t size() const { return size_; } TV_HOST_DEVICE_INLINE const T *data() const { return array_; } TV_HOST_DEVICE_INLINE T *data() { return array_; } TV_HOST_DEVICE_INLINE size_t empty() const { return size_ == 0; } typedef size_t size_type; class iterator { public: typedef iterator self_type; typedef T value_type; typedef T &reference; typedef T *pointer; typedef std::forward_iterator_tag iterator_category; typedef std::ptrdiff_t difference_type; TV_HOST_DEVICE_INLINE iterator(pointer ptr) : ptr_(ptr) {} TV_HOST_DEVICE_INLINE self_type operator++(int junk) { self_type i = *this; ptr_++; return i; } TV_HOST_DEVICE_INLINE self_type operator++() { ptr_++; return *this; } TV_HOST_DEVICE_INLINE reference operator*() { return *ptr_; } TV_HOST_DEVICE_INLINE pointer operator->() { return ptr_; } TV_HOST_DEVICE_INLINE bool operator==(const self_type &rhs) const { return ptr_ == rhs.ptr_; } TV_HOST_DEVICE_INLINE bool operator!=(const self_type &rhs) const { return ptr_ != rhs.ptr_; } private: pointer ptr_; }; class const_iterator { public: typedef const_iterator self_type; typedef T value_type; typedef const T &reference; typedef const T *pointer; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; TV_HOST_DEVICE_INLINE const_iterator(pointer ptr) : ptr_(ptr) {} TV_HOST_DEVICE_INLINE self_type operator++(int junk) { self_type i = *this; ptr_++; return i; } TV_HOST_DEVICE_INLINE self_type operator++() { ptr_++; return *this; } TV_HOST_DEVICE_INLINE reference operator*() { return *ptr_; } TV_HOST_DEVICE_INLINE pointer operator->() { return ptr_; } TV_HOST_DEVICE_INLINE bool operator==(const self_type &rhs) const { return ptr_ == rhs.ptr_; } TV_HOST_DEVICE_INLINE bool operator!=(const self_type &rhs) const { return ptr_ != rhs.ptr_; } private: pointer ptr_; }; TV_HOST_DEVICE_INLINE iterator begin() { return iterator(array_); } TV_HOST_DEVICE_INLINE iterator end() { return iterator(array_ + size_); } TV_HOST_DEVICE_INLINE const_iterator begin() const { return const_iterator(array_); } TV_HOST_DEVICE_INLINE const_iterator end() const { return const_iterator(array_ + size_); } TV_HOST_DEVICE_INLINE const_iterator cbegin() const { return const_iterator(array_); } TV_HOST_DEVICE_INLINE const_iterator cend() const { return const_iterator(array_ + size_); } protected: T array_[MaxDim]; size_t size_ = 0; }; template <typename T, size_t MaxDim> bool operator==(const SimpleVector<T, MaxDim> &lfs, const SimpleVector<T, MaxDim> &rfs) { if (lfs.size() != rfs.size()) return false; for (size_t i = 0; i < lfs.size(); ++i) { if (lfs[i] != rfs[i]) return false; } return true; } template <typename T, size_t MaxDim> bool operator!=(const SimpleVector<T, MaxDim> &lfs, const SimpleVector<T, MaxDim> &rfs) { return !(lfs == rfs); } struct Slice { template <class... Integers> TV_HOST_DEVICE_INLINE Slice(Integers... ints) { static_assert(sizeof...(ints) <= 3, "slice init must smaller than 3"); SimpleVector<int, 3> slices{int(ints)...}; slices_[0] = -1; slices_[1] = -1; slices_[2] = -1; for (size_t i = 0; i < slices.size(); ++i) { slices_[i] = slices[i]; } } TV_HOST_DEVICE_INLINE Slice() { slices_[0] = -1; slices_[1] = -1; slices_[2] = -1; } template <typename T> TV_HOST_DEVICE_INLINE Slice(std::initializer_list<T> slice) { slices_[0] = -1; slices_[1] = -1; slices_[2] = -1; TV_ASSERT(slice.size() <= 3); int idx = 0; for (T s : slice) { slices_[idx] = int(s); ++idx; } } TV_HOST_DEVICE_INLINE int &operator[](int idx) { #ifdef TV_DEBUG TV_ASSERT(idx >= 0 && idx < 3); #endif return slices_[idx]; } TV_HOST_DEVICE_INLINE const int &operator[](int idx) const { #ifdef TV_DEBUG TV_ASSERT(idx >= 0 && idx < 3); #endif return slices_[idx]; } protected: int slices_[3]; }; template <size_t MaxDim = TV_MAX_DIM, typename Tindex = int> struct ShapeBase : public SimpleVector<Tindex, MaxDim> { TV_HOST_DEVICE_INLINE ShapeBase() : SimpleVector<Tindex, MaxDim>(){}; TV_HOST_DEVICE_INLINE ShapeBase(std::initializer_list<Tindex> shape) : SimpleVector<Tindex, MaxDim>(shape) {} TV_HOST_DEVICE_INLINE ShapeBase(SimpleVector<Tindex, MaxDim> vec) : SimpleVector<Tindex, MaxDim>(vec) {} template <typename T, template <class...> class Container> ShapeBase(Container<T> shape) : SimpleVector<Tindex, MaxDim>(shape) {} TV_HOST_DEVICE_INLINE ShapeBase(const ShapeBase<MaxDim> &shape) : SimpleVector<Tindex, MaxDim>(shape) {} ShapeBase(const std::vector<Tindex> &arr) : SimpleVector<Tindex, MaxDim>(arr) {} ShapeBase<MaxDim, Tindex> & operator=(const ShapeBase<MaxDim, Tindex> &shape) = default; TV_HOST_DEVICE ShapeBase<MaxDim, Tindex> subshape(Tindex start, Tindex end) const { #ifdef TV_DEBUG TV_ASSERT(start >= 0 && end <= this->size_ && end > start); #endif ShapeBase<MaxDim, Tindex> shape; for (Tindex i = start; i < end; ++i) { shape.push_back(this->array_[i]); } return shape; } TV_HOST_DEVICE ShapeBase<MaxDim, Tindex> subshape(Tindex start) const { #ifdef TV_DEBUG TV_ASSERT(start >= 0 && start <= this->size_); #endif ShapeBase<MaxDim, Tindex> shape; for (size_t i = start; i < this->size_; ++i) { shape.push_back(this->array_[i]); } return shape; } TV_HOST_DEVICE size_t size() const { if (this->size_ == 0) return 0; size_t s = 1; for (int i = 0; i < int(this->size_); ++i) { s *= this->array_[i]; } return s; } TV_HOST_DEVICE_INLINE size_t ndim() const { return this->size_; } TV_HOST_DEVICE ShapeBase<MaxDim, Tindex> squeeze() const { ShapeBase<MaxDim, Tindex> shape; for (size_t i = 0; i < this->size_; ++i) { if (this->array_[i] != 1) shape.push_back(this->array_[i]); } if (shape.empty()) { // dont support empty shape for now shape.push_back(1); } return shape; } template <size_t MaxDim2 = MaxDim> TV_HOST_DEVICE ShapeBase<MaxDim2, Tindex> squeeze(int dim) const { static_assert(MaxDim2 >= MaxDim - 1, "error"); ShapeBase<MaxDim2, Tindex> shape; for (size_t i = 0; i < this->size_; ++i) { if (i != size_t(dim) || this->array_[i] != 1) shape.push_back(this->array_[i]); } return shape; } template <size_t MaxDim2 = MaxDim> TV_HOST_DEVICE ShapeBase<MaxDim2, Tindex> unsqueeze(int dim) const { static_assert(MaxDim2 >= MaxDim - 1, "error"); ShapeBase<MaxDim2, Tindex> shape; for (size_t i = 0; i < this->size_; ++i) { if (i == size_t(dim)) shape.push_back(1); shape.push_back(this->array_[i]); } return shape; } TV_HOST_DEVICE size_t prod(Tindex start = 0) const { size_t res = 1; for (size_t i = start; i < this->size_; ++i) { res *= this->array_[i]; } return res; } template <size_t MaxDim2 = MaxDim> TV_HOST_DEVICE ShapeBase<MaxDim2, Tindex> stride_rowmajor() { static_assert(MaxDim2 >= MaxDim, "error"); Tindex p = Tindex(1); ShapeBase<MaxDim2, Tindex> res(this->size_); for (Tindex i = this->size_ - 1; i >= 0; --i) { res[i] = p; p *= this->array_[i]; } return res; } }; using Shape = ShapeBase<TV_MAX_DIM, int>; template <class... Inds> TV_HOST_DEVICE_INLINE unsigned rowArrayIdx(std::vector<int> &shape, Inds... indexes) { unsigned offset = 0; unsigned m = 1; int indexes_vec[sizeof...(indexes)] = {indexes...}; #ifdef TV_DEBUG TV_ASSERT(sizeof...(indexes) == shape.size()); #endif #if defined(__CUDA_ARCH__) #pragma unroll #endif for (int i = sizeof...(indexes) - 1; i >= 0; --i) { offset += m * indexes_vec[i]; m *= shape[i]; } return offset; } TV_HOST_DEVICE_INLINE unsigned rowArrayIdx(std::vector<int> &shape, std::vector<int> &indexes_vec) { unsigned offset = 0; unsigned m = 1; for (int i = shape.size() - 1; i >= 0; --i) { offset += m * indexes_vec[i]; m *= shape[i]; } return offset; } template <class... Inds> TV_HOST_DEVICE_INLINE unsigned rowArrayIdx(const Shape &shape, Inds... indexes) { unsigned offset = 0; unsigned m = 1; int indexes_vec[sizeof...(indexes)] = {indexes...}; #if defined(__CUDA_ARCH__) #pragma unroll #endif for (int i = sizeof...(indexes) - 1; i >= 0; --i) { offset += m * indexes_vec[i]; m *= shape[i]; } return offset; } TV_HOST_DEVICE_INLINE unsigned rowArrayIdx(const Shape &shape, const Shape &indexes_vec) { unsigned offset = 0; unsigned m = 1; for (int i = indexes_vec.ndim() - 1; i >= 0; --i) { offset += m * indexes_vec[i]; m *= shape[i]; } return offset; } template <typename Index, unsigned NDim> TV_HOST_DEVICE_INLINE unsigned rowArrayIdx(const Index *indexes, const Index *shape) { unsigned offset = 0; unsigned m = 1; #if defined(__CUDA_ARCH__) #pragma unroll #endif for (int i = NDim - 1; i >= 0; --i) { offset += m * indexes[i]; m *= shape[i]; } return offset; } template <typename Index, unsigned NDim> TV_HOST_DEVICE_INLINE Index rowArrayIdxInv(Index index, Index *output, const Index *shape) { #pragma unroll for (int i = NDim - 1; i >= 0; --i) { output[i] = index % shape[i]; index -= output[i]; index /= shape[i]; } return index; } template <typename Index> TV_HOST_DEVICE Index rowArrayIdxInv(Index index, Index *output, const Index *shape, int ndim) { for (int i = ndim - 1; i >= 0; --i) { output[i] = index % shape[i]; index -= output[i]; index /= shape[i]; } return index; } template <int N> struct ArrayIndexRowMajorReverse { template <typename TShape, typename T, class... Ts> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *shape, T index, Ts... inds) { return index + shape[N - 1] * ArrayIndexRowMajorReverse<N - 1>::run(shape, inds...); } template <typename T, class... Ts> TV_HOST_DEVICE_INLINE static unsigned runShape(const Shape &shape, T index, Ts... inds) { return index + shape[N - 1] * ArrayIndexRowMajorReverse<N - 1>::run(shape, inds...); } }; template <> struct ArrayIndexRowMajorReverse<1> { template <typename TShape, typename T> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *shape, T idx) { return idx; } template <typename T> TV_HOST_DEVICE_INLINE static unsigned runShape(const Shape &shape, T idx) { return idx; } }; template <int N, int Ndim> struct ArrayIndexRowMajor { // this array index provide almost same compiled code. compile it in // https://godbolt.org/ for more details. template <typename TShape, typename Tinit, typename T, class... Ts> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *shape, Tinit start, T index, Ts... inds) { return ArrayIndexRowMajor<N - 1, Ndim>::run( shape, (index + start) * shape[Ndim - N + 1], inds...); } template <typename Tinit, typename T, class... Ts> TV_HOST_DEVICE_INLINE static unsigned runShape(const Shape &shape, Tinit start, T index, Ts... inds) { return ArrayIndexRowMajor<N - 1, Ndim>::runShape( shape, (index + start) * shape[Ndim - N + 1], inds...); } template <typename TShape, typename Tinit> TV_HOST_DEVICE_INLINE static unsigned runPtrs(const TShape *indexes, const TShape *shape, Tinit start) { return ArrayIndexRowMajor<N - 1, Ndim>::runPtrs( indexes, shape, (indexes[Ndim - N] + start) * shape[Ndim - N + 1]); } }; template <int Ndim> struct ArrayIndexRowMajor<1, Ndim> { template <typename TShape, typename Tinit, typename T> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *shape, Tinit start, T idx) { return start + idx; } template <typename Tinit, typename T> TV_HOST_DEVICE_INLINE static unsigned runShape(const Shape &shape, Tinit start, T idx) { return start + idx; } template <typename TShape, typename Tinit> TV_HOST_DEVICE_INLINE static unsigned runPtrs(const TShape *indexes, const TShape *shape, Tinit start) { return start + indexes[Ndim - 1]; } }; template <> struct ArrayIndexRowMajor<0, 0> { template <typename TShape, typename Tinit> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *shape, Tinit start) { return 0; } template <typename Tinit> TV_HOST_DEVICE_INLINE static unsigned runShape(const Shape &shape, Tinit start) { return 0; } template <typename TShape, typename Tinit> TV_HOST_DEVICE_INLINE static unsigned runPtrs(const TShape *indexes, const TShape *shape, Tinit start) { return 0; } }; template <int N, int Ndim> struct ArrayIndexStride { // this array index provide almost same compiled code. compile it in // https://godbolt.org/ for more details. template <typename TShape, typename Tinit, typename T, class... Ts> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *stride, Tinit start, T index, Ts... inds) { return ArrayIndexStride<N - 1, Ndim>::run( stride, start + index * stride[Ndim - N + 1], inds...); } }; template <int Ndim> struct ArrayIndexStride<1, Ndim> { template <typename TShape, typename Tinit, typename T> TV_HOST_DEVICE_INLINE static unsigned run(const TShape *stride, Tinit start, T idx) { return start + idx * stride[Ndim - 1]; } }; #if __cplusplus >= 201703L template <size_t... N, class T, class... Ts> TV_HOST_DEVICE_INLINE T array_index_stride(const T *stride, Ts... ids) { return ((stride[N] * std::get<N>(std::forward_as_tuple(ids...))) + ...); } #endif namespace detail { template <typename T> struct TypeToString; template <> struct TypeToString<bool> { static constexpr const char *value = "bool"; }; template <> struct TypeToString<const bool> { static constexpr const char *value = "bool"; }; template <> struct TypeToString<int32_t> { static constexpr const char *value = "int32"; }; template <> struct TypeToString<float> { static constexpr const char *value = "float"; }; template <> struct TypeToString<double> { static constexpr const char *value = "double"; }; template <> struct TypeToString<int16_t> { static constexpr const char *value = "int16"; }; template <> struct TypeToString<int8_t> { static constexpr const char *value = "int8"; }; template <> struct TypeToString<int64_t> { static constexpr const char *value = "int64"; }; template <> struct TypeToString<uint8_t> { static constexpr const char *value = "uint8"; }; template <> struct TypeToString<uint16_t> { static constexpr const char *value = "uint16"; }; template <> struct TypeToString<uint32_t> { static constexpr const char *value = "uint32"; }; template <> struct TypeToString<uint64_t> { static constexpr const char *value = "uint64"; }; template <> struct TypeToString<const int32_t> { static constexpr const char *value = "int32"; }; template <> struct TypeToString<const float> { static constexpr const char *value = "float"; }; template <> struct TypeToString<const double> { static constexpr const char *value = "double"; }; template <> struct TypeToString<const int16_t> { static constexpr const char *value = "int16"; }; template <> struct TypeToString<const int8_t> { static constexpr const char *value = "int8"; }; template <> struct TypeToString<const int64_t> { static constexpr const char *value = "int64"; }; template <> struct TypeToString<const uint8_t> { static constexpr const char *value = "uint8"; }; template <> struct TypeToString<const uint16_t> { static constexpr const char *value = "uint16"; }; template <> struct TypeToString<const uint32_t> { static constexpr const char *value = "uint32"; }; template <> struct TypeToString<const uint64_t> { static constexpr const char *value = "uint64"; }; } // namespace detail template <typename T> constexpr const char *type_s = detail::TypeToString<T>::value; namespace detail { template <typename T, int Rank, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int> struct TensorAccesserBase { static constexpr int rank_value = Rank; using ptr_t = typename PtrTraits<T>::type; static_assert(Rank > 0, "error"); explicit TV_HOST_DEVICE_INLINE TensorAccesserBase(ptr_t ptr, const Tindex *stride_ptr) : ptr_(ptr), stride_ptr_(stride_ptr) {} TV_HOST_DEVICE_INLINE ptr_t data() { return ptr_; } TV_HOST_DEVICE_INLINE const ptr_t data() const { return ptr_; } template <class... Inds> TV_HOST_DEVICE_INLINE T &operator()(Inds... inds) { static_assert(sizeof...(inds) == Rank, "error"); return ptr_[ArrayIndexStride<Rank, Rank>::run(stride_ptr_, 0, inds...)]; } template <class... Inds> TV_HOST_DEVICE_INLINE const T &operator()(Inds... inds) const { static_assert(sizeof...(inds) == Rank, "error"); return ptr_[ArrayIndexStride<Rank, Rank>::run(stride_ptr_, 0, inds...)]; } protected: ptr_t ptr_; const Tindex *stride_ptr_; }; } // namespace detail template <typename T, int Rank, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int> struct TensorAccesser : public detail::TensorAccesserBase<T, Rank, PtrTraits, Tindex> { using ptr_t = typename PtrTraits<T>::type; static_assert(Rank > 0, "error"); explicit TV_HOST_DEVICE_INLINE TensorAccesser(ptr_t ptr, const Tindex *stride_ptr) : detail::TensorAccesserBase<T, Rank, PtrTraits, Tindex>(ptr, stride_ptr) {} TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank - 1, PtrTraits, Tindex> operator[](int i) { return TensorAccesser<T, Rank - 1, PtrTraits, Tindex>( this->ptr_ + this->stride_ptr_[0] * i, this->stride_ptr_ + 1); } TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank - 1, PtrTraits, Tindex> operator[](int i) const { return TensorAccesser<T, Rank - 1, PtrTraits, Tindex>( this->ptr_ + this->stride_ptr_[0] * i, this->stride_ptr_ + 1); } }; template <typename T, template <class> class PtrTraits, typename Tindex> struct TensorAccesser<T, 1, PtrTraits, Tindex> : public detail::TensorAccesserBase<T, 1, PtrTraits, Tindex> { using ptr_t = typename PtrTraits<T>::type; explicit TV_HOST_DEVICE_INLINE TensorAccesser(ptr_t ptr, const Tindex *stride_ptr) : detail::TensorAccesserBase<T, 1, PtrTraits, Tindex>(ptr, stride_ptr) {} TV_HOST_DEVICE_INLINE T &operator[](int i) { return this->ptr_[this->stride_ptr_[0] * i]; } TV_HOST_DEVICE_INLINE T &operator[](int i) const { return this->ptr_[this->stride_ptr_[0] * i]; } }; template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int> struct TensorView { static constexpr int rank_value = Rank; using ptr_t = typename PtrTraits<T>::type; using tv_shape_t = ShapeBase<Rank == -1 ? TV_MAX_DIM : Rank, Tindex>; using no_cv_type = typename std::remove_cv<T>::type; static_assert(Rank == -1 || Rank > 0, "error"); TV_HOST_DEVICE_INLINE TensorView() {} explicit TV_HOST_DEVICE_INLINE TensorView(ptr_t ptr, tv_shape_t shape) : ptr_(ptr), shape_(shape), stride_(shape.stride_rowmajor()) {} explicit TV_HOST_DEVICE_INLINE TensorView(ptr_t ptr, tv_shape_t shape, tv_shape_t stride) : ptr_(ptr), shape_(shape), stride_(stride) {} operator TensorView<const no_cv_type, Rank, PtrTraits, Tindex>() { return TensorView<const no_cv_type, Rank, PtrTraits, Tindex>(ptr_, shape_); } // conversion function template <class... Inds> TV_HOST_DEVICE_INLINE T &operator()(Inds... inds) { static_assert(Rank == -1 || sizeof...(inds) == Rank, "error"); #if defined TV_DEBUG int idxes[sizeof...(Inds)]{int(inds)...}; TV_REQUIRE(sizeof...(inds) == shape_.ndim(), "you provide %d indexes, but dim is %d\n", sizeof...(inds), shape_.ndim()); for (int i = 0; i < sizeof...(inds); ++i) { TV_REQUIRE(idxes[i] >= 0 && idxes[i] < shape_[i], "index-%d(%d) out-of-range: [0, %d)\n", i, idxes[i], shape_[i]); } #endif constexpr int Ndim = sizeof...(Inds); return ptr_[ArrayIndexRowMajor<Ndim, Ndim>::runShape(shape_, 0, inds...)]; } template <class... Inds> TV_HOST_DEVICE_INLINE const T &operator()(Inds... inds) const { static_assert(Rank == -1 || sizeof...(inds) == Rank, "error"); #if defined TV_DEBUG int idxes[sizeof...(Inds)]{int(inds)...}; TV_REQUIRE(sizeof...(inds) == shape_.ndim(), "you provide %d indexes, but dim is %d\n", sizeof...(inds), shape_.ndim()); for (int i = 0; i < sizeof...(inds); ++i) { TV_REQUIRE(idxes[i] >= 0 && idxes[i] < shape_[i], "index-%d(%d) out-of-range: [0, %d)\n", i, idxes[i], shape_[i]); } #endif constexpr int Ndim = sizeof...(Inds); return ptr_[ArrayIndexRowMajor<Ndim, Ndim>::runShape(shape_, 0, inds...)]; } TV_HOST_DEVICE_INLINE T &operator()() { static_assert(Rank == -1 || 0 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(ptr_ != nullptr, "you want get value but the view is empty.%s", "\n"); TV_REQUIRE(shape_.ndim() == 0, "you provide 0 indexes, but dim is %ld\n", shape_.ndim()); #endif return ptr_[0]; } TV_HOST_DEVICE_INLINE const T &operator()() const { static_assert(Rank == -1 || 0 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(ptr_ != nullptr, "you want get value but the view is empty.%s", "\n"); TV_REQUIRE(shape_.ndim() == 0, "you provide 0 indexes, but dim is %ld\n", shape_.ndim()); #endif return ptr_[0]; } template <class T1> TV_HOST_DEVICE_INLINE T &operator()(T1 i1) { static_assert(Rank == -1 || 1 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 1, "you provide 1 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, i1, shape_[0]); #endif return ptr_[i1]; } template <class T1, class T2> TV_HOST_DEVICE_INLINE T &operator()(T1 i1, T2 i2) { static_assert(Rank == -1 || 2 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 2, "you provide 2 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); #endif return ptr_[i1 * shape_[1] + i2]; } template <class T1, class T2, class T3> TV_HOST_DEVICE_INLINE T &operator()(T1 i1, T2 i2, T3 i3) { static_assert(Rank == -1 || 3 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 3, "you provide 3 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); TV_REQUIRE(i3 >= 0 && i3 < shape_[2], "index-%d(%d) out-of-range: [0, %d)\n", 2, int(i3), shape_[2]); #endif return ptr_[(i1 * shape_[1] + i2) * shape_[2] + i3]; } template <class T1, class T2, class T3, class T4> TV_HOST_DEVICE_INLINE T &operator()(T1 i1, T2 i2, T3 i3, T4 i4) { static_assert(Rank == -1 || 4 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 4, "you provide 4 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); TV_REQUIRE(i3 >= 0 && i3 < shape_[2], "index-%d(%d) out-of-range: [0, %d)\n", 2, int(i3), shape_[2]); TV_REQUIRE(i4 >= 0 && i4 < shape_[3], "index-%d(%d) out-of-range: [0, %d)\n", 3, int(i4), shape_[3]); #endif return ptr_[((i1 * shape_[1] + i2) * shape_[2] + i3) * shape_[3] + i4]; } template <class T1> TV_HOST_DEVICE_INLINE const T &operator()(T1 i1) const { static_assert(Rank == -1 || 1 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 1, "you provide 1 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); #endif return ptr_[i1]; } template <class T1, class T2> TV_HOST_DEVICE_INLINE const T &operator()(T1 i1, T2 i2) const { static_assert(Rank == -1 || 2 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 2, "you provide 2 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); #endif return ptr_[i1 * shape_[1] + i2]; } template <class T1, class T2, class T3> TV_HOST_DEVICE_INLINE const T &operator()(T1 i1, T2 i2, T3 i3) const { static_assert(Rank == -1 || 3 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 3, "you provide 3 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); TV_REQUIRE(i3 >= 0 && i3 < shape_[2], "index-%d(%d) out-of-range: [0, %d)\n", 2, int(i3), shape_[2]); #endif return ptr_[(i1 * shape_[1] + i2) * shape_[2] + i3]; } template <class T1, class T2, class T3, class T4> TV_HOST_DEVICE_INLINE const T &operator()(T1 i1, T2 i2, T3 i3, T4 i4) const { static_assert(Rank == -1 || 4 == Rank, "error"); #if defined TV_DEBUG TV_REQUIRE(shape_.ndim() == 4, "you provide 4 indexes, but dim is %ld\n", shape_.ndim()); TV_REQUIRE(i1 >= 0 && i1 < shape_[0], "index-%d(%d) out-of-range: [0, %d)\n", 0, int(i1), shape_[0]); TV_REQUIRE(i2 >= 0 && i2 < shape_[1], "index-%d(%d) out-of-range: [0, %d)\n", 1, int(i2), shape_[1]); TV_REQUIRE(i3 >= 0 && i3 < shape_[2], "index-%d(%d) out-of-range: [0, %d)\n", 2, int(i3), shape_[2]); TV_REQUIRE(i4 >= 0 && i4 < shape_[3], "index-%d(%d) out-of-range: [0, %d)\n", 3, int(i4), shape_[3]); #endif return ptr_[((i1 * shape_[1] + i2) * shape_[2] + i3) * shape_[3] + i4]; } TV_HOST_DEVICE_INLINE T &operator[](int idx) { #ifdef TV_DEBUG TV_REQUIRE(idx >= 0 && idx < size(), "index(%d) out-of-range: [0, %ld)\n", int(idx), size()); #endif return ptr_[idx]; } TV_HOST_DEVICE_INLINE const T &operator[](int idx) const { #ifdef TV_DEBUG TV_REQUIRE(idx >= 0 && idx < size(), "index(%d) out-of-range: [0, %ld)\n", int(idx), size()); #endif return ptr_[idx]; } TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank - 1, PtrTraits, Tindex> accessor(Tindex idx) { static_assert(Rank > 1, "for Rank == 1, use accessor() or just use []"); return TensorAccesser<T, Rank - 1, PtrTraits, Tindex>( ptr_ + stride_[0] * idx, stride_.data() + 1); } TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank, PtrTraits, Tindex> accessor() { static_assert(Rank > 0, "rank must higher than zero"); return TensorAccesser<T, Rank, PtrTraits, Tindex>(ptr_, stride_.data()); } TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank - 1, PtrTraits, Tindex> accessor(Tindex idx) const { static_assert(Rank > 1, "for Rank == 1, use accessor() or just use []"); return TensorAccesser<T, Rank - 1, PtrTraits, Tindex>( ptr_ + stride_[0] * idx, stride_.data() + 1); } TV_HOST_DEVICE_INLINE TensorAccesser<T, Rank, PtrTraits, Tindex> accessor() const { static_assert(Rank > 0, "error"); return TensorAccesser<T, Rank, PtrTraits, Tindex>( ptr_, stride_.data(), "rank must higher than zero"); } TV_HOST_DEVICE_INLINE bool empty() const { return ptr_ == nullptr; } TV_HOST_DEVICE_INLINE ptr_t data() { return ptr_; } TV_HOST_DEVICE_INLINE const ptr_t data() const { return ptr_; } TV_HOST_DEVICE_INLINE const tv_shape_t &shape() const { return shape_; } TV_HOST_DEVICE_INLINE const tv_shape_t &stride() const { return stride_; } TV_HOST_DEVICE_INLINE int dim(int idx) const { return shape_[idx]; } TV_HOST_DEVICE_INLINE int ndim() const { return shape_.ndim(); } template <class... Inds> TV_HOST_DEVICE_INLINE TensorView<T, Rank == -1 ? -1 : sizeof...(Inds), PtrTraits, Tindex> view(Inds... newShapes) const { ShapeBase<Rank == -1 ? TV_MAX_DIM : sizeof...(Inds), Tindex> shapes{ int(newShapes)...}; for (size_t i = 0; i < sizeof...(newShapes); ++i) { if (shapes[i] == -1) { shapes[i] = 1; shapes[i] = size() / shapes.size(); break; } } TV_ASSERT(shapes.size() == size()); return TensorView < T, Rank == -1 ? -1 : sizeof...(Inds), PtrTraits, Tindex > (ptr_, shapes); } TV_HOST_DEVICE_INLINE TensorView<T, -1, PtrTraits, Tindex> view(Shape shapes) const { TV_ASSERT(shapes.size() == size()); return TensorView<T, -1, PtrTraits, Tindex>(ptr_, shapes); } TV_HOST_DEVICE_INLINE TensorView<T, -1, PtrTraits, Tindex> squeeze() const { return TensorView<T, -1, PtrTraits, Tindex>(ptr_, shape_.squeeze()); } TV_HOST_DEVICE_INLINE TensorView<T, Rank == -1 ? -1 : Rank - 1, PtrTraits, Tindex> squeeze(int dim) const { return TensorView < T, Rank == -1 ? -1 : Rank - 1, PtrTraits, Tindex > (ptr_, shape_.squeeze < Rank == -1 ? TV_MAX_DIM : Rank - 1 > (dim)); } TV_HOST_DEVICE_INLINE size_t size() const { return shape_.size(); } template <class... Integers> TV_HOST_DEVICE_INLINE TensorView<T, -1, PtrTraits, Tindex> subview(int id, Integers... ints) { tv_shape_t start = {id, ints...}; for (int i = 1 + sizeof...(ints); i < ndim(); ++i) { start.push_back(0); } return TensorView<T, Rank, PtrTraits, Tindex>( ptr_ + rowArrayIdx(shape_, start), shape_.subshape(sizeof...(ints) + 1)); } template <class... Integers> TV_HOST_DEVICE_INLINE TensorView<T, -1, PtrTraits, Tindex> subview(int id, Integers... ints) const { tv_shape_t start = {id, ints...}; for (int i = 1 + sizeof...(ints); i < ndim(); ++i) { start.push_back(0); } return TensorView<T, Rank, PtrTraits, Tindex>( ptr_ + rowArrayIdx(shape_, start), shape_.subshape(sizeof...(ints) + 1)); } TV_HOST_DEVICE_INLINE TensorView<T, -1, PtrTraits, Tindex> subview(SimpleVector<int> ids) const { Shape start = ids; for (int i = ids.size(); i < ndim(); ++i) { start.push_back(0); } return TensorView<T, Rank, PtrTraits, Tindex>( ptr_ + rowArrayIdx(shape_, start), shape_.subshape(ids.size())); } template <typename Os> std::string repr(Os &ss) const { if (empty()) return ""; if (shape_.ndim() == 0) { ss << "Tensor[" << type_s<T> << "]" << std::endl; ss << *ptr_; return ss.str(); } SimpleVector<int64_t, TV_MAX_DIM> prev(ndim(), -1); SimpleVector<int64_t, TV_MAX_DIM> nd_index(ndim()); SimpleVector<int64_t, TV_MAX_DIM> _shape; for (auto s : shape()) { _shape.push_back(s); } ss << "Tensor[" << type_s<T> << "]: shape=" << shape() << ", stride=" << stride() << std::endl; auto ndimValue = ndim(); for (int64_t i = 0; i < int64_t(size()); ++i) { rowArrayIdxInv(i, nd_index.data(), _shape.data(), ndimValue); bool newline = false; int end_count = 0; for (int j = 0; j < ndimValue; ++j) { if (nd_index[j] != prev[j] && nd_index[j] == 0 && prev[j] != 0 && prev[j] != -1) { ss << "]"; ++end_count; newline = true; } } if (prev[0] == -1) { end_count = ndimValue; } if (newline) { ss << "\n"; } int starts_count = 0; for (int j = 0; j < ndimValue; ++j) { if (nd_index[j] != prev[j] && nd_index[j] == 0 && prev[j] != 0) { ++starts_count; } } if (starts_count > 0) { for (int j = 0; j < ndimValue - end_count; ++j) { ss << " "; } for (int j = 0; j < starts_count; ++j) { ss << "["; } } if (std::is_same<T, uint8_t>::value || std::is_same<T, const uint8_t>::value) { ss << unsigned((*this)[i]); } else { ss << (*this)[i]; } if (nd_index[ndimValue - 1] != _shape[ndimValue - 1] - 1) { ss << ","; } for (int j = 0; j < ndimValue; ++j) { prev[j] = nd_index[j]; } } for (int j = 0; j < ndimValue; ++j) { ss << "]"; } return ss.str(); } std::string repr() const { std::ostringstream ss; return repr(ss); } protected: template <typename T1> TV_HOST_DEVICE_INLINE Slice to_slice(T1 s) const { return Slice{int(s), -1, -1}; } TV_HOST_DEVICE_INLINE Slice to_slice(Slice s) const { return Slice(s); } ptr_t ptr_ = nullptr; tv_shape_t shape_; tv_shape_t stride_; }; template <typename T> TensorView<T> vector2tv(std::vector<T> &arr) { return TensorView<T>(arr.data(), {arr.size()}); } template <typename T> TensorView<T> vector2tv(std::vector<T> &arr, Shape shape) { TV_ASSERT_INVALID_ARG(shape.prod() == arr.size(), "error"); return TensorView<T>(arr.data(), shape); } template <typename T> TensorView<const T> vector2tv(const std::vector<T> &arr) { return TensorView<const T>(arr.data(), {arr.size()}); } template <typename Os, typename T, int Rank, template <class> class PtrTraits, typename Tindex> Os &operator<<(Os &os, const TensorView<T, Rank, PtrTraits, Tindex> &dt) { os << dt.repr(); return os; } template <typename Os, typename T, int Rank, template <class> class PtrTraits, typename Tindex> Os &operator<<(Os &os, const TensorView<const T, Rank, PtrTraits, Tindex> &dt) { os << dt.repr(); return os; } namespace detail { template <typename T> struct TypePrintfFormat; template <> struct TypePrintfFormat<float> { static constexpr const char *value = "%.2f"; }; template <> struct TypePrintfFormat<double> { static constexpr const char *value = "%.2f"; }; template <> struct TypePrintfFormat<int8_t> { static constexpr const char *value = "%d"; }; template <> struct TypePrintfFormat<int16_t> { static constexpr const char *value = "%d"; }; template <> struct TypePrintfFormat<int32_t> { static constexpr const char *value = "%d"; }; template <> struct TypePrintfFormat<uint8_t> { static constexpr const char *value = "%u"; }; template <> struct TypePrintfFormat<uint16_t> { static constexpr const char *value = "%u"; }; template <> struct TypePrintfFormat<uint32_t> { static constexpr const char *value = "%u"; }; template <> struct TypePrintfFormat<int64_t> { static constexpr const char *value = "%ld"; }; template <> struct TypePrintfFormat<uint64_t> { static constexpr const char *value = "%lu"; }; template <> struct TypePrintfFormat<bool> { static constexpr const char *value = "%d"; }; template <typename T> constexpr const char *type_printf_format_v = TypePrintfFormat<T>::value; }; // namespace detail template <typename T, int Rank, template <class> class PtrTraits, typename Tindex> TV_HOST_DEVICE void printTensorView(const TensorView<T, Rank, PtrTraits, Tindex> &tensor, const char *format) { // used to print tensor in cuda kernel. if (tensor.empty()) return; if (tensor.ndim() == 0) { printf(format, tensor()); printf("\n"); return; } SimpleVector<int64_t, TV_MAX_DIM> prev(tensor.ndim(), -1); SimpleVector<int64_t, TV_MAX_DIM> nd_index(tensor.ndim()); SimpleVector<int64_t, TV_MAX_DIM> shape(tensor.shape()); auto ndim = tensor.ndim(); for (int64_t i = 0; i < tensor.size(); ++i) { rowArrayIdxInv(i, nd_index.data(), shape.data(), ndim); bool newline = false; int end_count = 0; for (int j = 0; j < ndim; ++j) { if (nd_index[j] != prev[j] && nd_index[j] == 0 && prev[j] != 0 && prev[j] != -1) { printf("]"); ++end_count; newline = true; } } if (prev[0] == -1) { end_count = ndim; } if (newline) { printf("\n"); } int starts_count = 0; for (int j = 0; j < ndim; ++j) { if (nd_index[j] != prev[j] && nd_index[j] == 0 && prev[j] != 0) { ++starts_count; } } if (starts_count > 0) { for (int j = 0; j < ndim - end_count; ++j) { printf(" "); } for (int j = 0; j < starts_count; ++j) { printf("]"); } } printf(format, tensor[i]); if (nd_index[ndim - 1] != shape[ndim - 1] - 1) { printf(","); } for (int j = 0; j < ndim; ++j) { prev[j] = nd_index[j]; } } for (int j = 0; j < ndim; ++j) { printf("]"); } printf("\n"); } template <typename T, int Rank, template <class> class PtrTraits, typename Tindex> TV_HOST_DEVICE void printTensorView(TensorView<T, Rank, PtrTraits, Tindex> tensor) { using Traw = typename std::remove_const<T>::type; return printTensorView(tensor, detail::type_printf_format_v<Traw>); } template <typename T> TV_HOST_DEVICE void printTensorView(const T *ptr, Shape shape) { using Traw = typename std::remove_const<T>::type; return printTensorView(TensorView<const T>(ptr, shape), detail::type_printf_format_v<Traw>); } template <typename T> TV_HOST_DEVICE void printTensorView(const T *ptr, Shape shape, const char *format) { return printTensorView(TensorView<const T>(ptr, shape), format); } #ifdef TV_CUDA #ifdef __DRIVER_TYPES_H__ #ifndef DEVICE_RESET #define DEVICE_RESET cudaDeviceReset(); #endif #else #ifndef DEVICE_RESET #define DEVICE_RESET #endif #endif template <typename T> void check(T result, char const *const func, const char *const file, int const line) { if (result) { fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n", file, line, static_cast<unsigned int>(result), func); DEVICE_RESET // Make sure we call CUDA Device Reset before exiting exit(EXIT_FAILURE); } } #define checkCudaErrors(val) tv::check((val), #val, __FILE__, __LINE__) template <typename T> void host2dev(T *dst, const T *src, size_t size, cudaStream_t s = 0) { checkCudaErrors( cudaMemcpyAsync(dst, src, size * sizeof(T), cudaMemcpyHostToDevice, s)); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2dev(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<const T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { host2dev(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2dev(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { host2dev(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T> void host2dev_sync(T *dst, const T *src, size_t size) { checkCudaErrors( cudaMemcpy(dst, src, size * sizeof(T), cudaMemcpyHostToDevice)); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2dev_sync(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<const T, Rank, PtrTraits2, Tindex2> src) { host2dev_sync(dst.data(), src.data(), std::min(dst.size(), src.size())); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2dev_sync(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<T, Rank, PtrTraits2, Tindex2> src) { host2dev_sync(dst.data(), src.data(), std::min(dst.size(), src.size())); } template <typename T> void dev2host(T *dst, const T *src, size_t size, cudaStream_t s = 0) { checkCudaErrors( cudaMemcpyAsync(dst, src, size * sizeof(T), cudaMemcpyDeviceToHost, s)); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void dev2host(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<const T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { dev2host(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void dev2host(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { dev2host(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T> void dev2dev(T *dst, const T *src, size_t size, cudaStream_t s = 0) { checkCudaErrors( cudaMemcpyAsync(dst, src, size * sizeof(T), cudaMemcpyDeviceToDevice, s)); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void dev2dev(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<const T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { dev2dev(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void dev2dev(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { dev2dev(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T> void host2host(T *dst, const T *src, size_t size, cudaStream_t s = 0) { checkCudaErrors( cudaMemcpyAsync(dst, src, size * sizeof(T), cudaMemcpyHostToHost, s)); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2host(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<const T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { host2host(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T, int Rank, template <class> class PtrTraits1, template <class> class PtrTraits2, typename Tindex1, typename Tindex2> void host2host(TensorView<T, Rank, PtrTraits1, Tindex1> dst, const TensorView<T, Rank, PtrTraits2, Tindex2> src, cudaStream_t s = 0) { host2host(dst.data(), src.data(), std::min(dst.size(), src.size()), s); } template <typename T, int Rank, template <class> class PtrTraits, typename Tindex> void zero_dev(TensorView<T, Rank, PtrTraits, Tindex> tensor) { checkCudaErrors(cudaMemset(tensor.data(), 0, tensor.size() * sizeof(T))); } template <typename T, int Rank, template <class> class PtrTraits, typename Tindex> void zero_dev(TensorView<T, Rank, PtrTraits, Tindex> tensor, cudaStream_t s) { checkCudaErrors( cudaMemsetAsync(tensor.data(), 0, tensor.size() * sizeof(T), s)); } template <typename T, int Rank, template <class> class PtrTraits, typename Tindex> void zero_host(TensorView<T, Rank, PtrTraits, Tindex> tensor) { std::fill(tensor.data(), tensor.data() + tensor.size(), 0); } #endif } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tensorview/tools.h
C/C++ Header
// Copyright 2019-2020 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <chrono> #ifdef TV_CUDA #include <cuda_runtime_api.h> #endif #include <iostream> namespace tv { #ifdef TV_CUDA template <typename TimeT = std::chrono::microseconds> struct CudaContextTimer { CudaContextTimer() { cudaDeviceSynchronize(); mCurTime = std::chrono::steady_clock::now(); } typename TimeT::rep report() { cudaDeviceSynchronize(); auto duration = std::chrono::duration_cast<TimeT>( std::chrono::steady_clock::now() - mCurTime); auto res = duration.count(); mCurTime = std::chrono::steady_clock::now(); return res; } private: std::chrono::time_point<std::chrono::steady_clock> mCurTime; }; #endif template <typename TimeT = std::chrono::microseconds> struct CPUTimer { CPUTimer() { mCurTime = std::chrono::steady_clock::now(); } typename TimeT::rep report() { auto duration = std::chrono::duration_cast<TimeT>( std::chrono::steady_clock::now() - mCurTime); auto res = duration.count(); mCurTime = std::chrono::steady_clock::now(); return res; } private: std::chrono::time_point<std::chrono::steady_clock> mCurTime; }; } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tensorview/torch_utils.h
C/C++ Header
// Copyright 2019-2020 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "mp_helper.h" #include <tensorview/tensorview.h> #include <ATen/ATen.h> #include <torch/script.h> #ifdef TV_CUDA #include <ATen/cuda/CUDAContext.h> #endif namespace tv { #ifdef TV_CUDA struct TorchGPU : public tv::GPU { virtual cudaStream_t getStream() const override { return at::cuda::getCurrentCUDAStream(); } }; #endif namespace detail { template <typename T> struct TypeToTorchDtypeTraits; template <> struct TypeToTorchDtypeTraits<int32_t> { static constexpr decltype(torch::kInt32) value = torch::kInt32; }; template <> struct TypeToTorchDtypeTraits<int16_t> { static constexpr decltype(torch::kInt32) value = torch::kInt16; }; template <> struct TypeToTorchDtypeTraits<int8_t> { static constexpr decltype(torch::kInt8) value = torch::kInt8; }; template <> struct TypeToTorchDtypeTraits<int64_t> { static constexpr decltype(torch::kInt32) value = torch::kInt64; }; template <> struct TypeToTorchDtypeTraits<uint8_t> { static constexpr decltype(torch::kInt32) value = torch::kUInt8; }; template <> struct TypeToTorchDtypeTraits<bool> { static constexpr decltype(torch::kInt32) value = torch::kBool; }; template <> struct TypeToTorchDtypeTraits<float> { static constexpr decltype(torch::kInt32) value = torch::kFloat32; }; template <> struct TypeToTorchDtypeTraits<double> { static constexpr decltype(torch::kInt32) value = torch::kFloat64; }; template <> struct TypeToTorchDtypeTraits<at::Half> { static constexpr decltype(torch::kInt32) value = torch::kHalf; }; using all_torch_types_t = std::tuple<float, double, int8_t, int16_t, int32_t, int64_t, uint8_t, bool, at::Half>; } // namespace detail template <typename T> constexpr decltype(torch::kInt32) torch_type_v = detail::TypeToTorchDtypeTraits<T>::value; template <class... Ts, typename F> void dispatch_torch(at::ScalarType t, F &&f) { static_assert(sizeof...(Ts) > 0, "you need to provide at least one type"); bool notFound = true; tv::mp_for_each<mp_list<Ts...>>([=, &notFound, &f](auto I) { if (detail::TypeToTorchDtypeTraits<TV_DECLTYPE(I)>::value == t) { std::forward<F>(f)(TV_DECLTYPE(I)()); notFound = false; } }); if (notFound) { std::stringstream ss; tv::mp_for_each<mp_list<Ts...>>([=, &ss](auto I) { ss << tv::detail::TypeToString<TV_DECLTYPE(I)>::value << " "; }); TV_THROW_RT_ERR("unknown type", t, ", available:", ss.str()); } } template <class T> struct DispatchTorch; template <template <class...> class T, class... Args> struct DispatchTorch<T<Args...>> { template <typename F> inline void operator()(at::ScalarType t, F &&f) { return dispatch_torch<Args...>(t, std::forward<F>(f)); } }; template <typename T> void check_torch_dtype(const torch::Tensor &tensor) { DispatchTorch<detail::all_torch_types_t>()(tensor.scalar_type(), [&](auto I) { using Ttensor = TV_DECLTYPE(I); constexpr bool val = std::is_same<std::remove_cv_t<T>, Ttensor>::value; TV_ASSERT_RT_ERR(val, "error"); }); } template <typename T, int Rank = -1, template <class> class PtrTraits = DefaultPtrTraits, typename Tindex = int> TensorView<T, Rank, PtrTraits, Tindex> torch2tv(const torch::Tensor &tensor) { using tv_shape_t = typename TensorView<T, Rank, PtrTraits, Tindex>::tv_shape_t; check_torch_dtype<T>(tensor); // TODO stride if (Rank > 0) { TV_ASSERT_INVALID_ARG(tensor.dim() == Rank, "error"); } tv_shape_t shape; for (auto i : tensor.sizes()) { shape.push_back(i); } return tv::TensorView<T, Rank, PtrTraits, Tindex>( tensor.data_ptr<std::remove_const_t<T>>(), shape); } template <typename T> torch::Tensor torch_slice_first_axis(torch::Tensor tensor, T start, T end) { // only torch >= 1.5 have tensor slice. torch::Tensor res; auto tensor_shape = tensor.sizes(); std::vector<int64_t> shape(tensor_shape.begin(), tensor_shape.end()); shape[0] = end - start; uint8_t *ptr = reinterpret_cast<uint8_t *>(tensor.data_ptr()); res = torch::from_blob(ptr + start * tensor.stride(0) * tensor.itemsize(), torch::IntArrayRef(shape), tensor.options()); return res; } namespace detail { template <> struct TypeToString<at::Half> { static constexpr const char *value = "half"; }; } // namespace detail } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/torch_utils.h
C/C++ Header
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <tensorview/mp_helper.h> #include <tensorview/tensorview.h> #include <ATen/ATen.h> #include <torch/script.h> #ifdef TV_CUDA #include <ATen/cuda/CUDAContext.h> #endif namespace tv { #ifdef TV_CUDA struct TorchGPU : public tv::GPU { virtual cudaStream_t getStream() const override { return at::cuda::getCurrentCUDAStream(); } }; #endif template <typename T> void check_torch_dtype(const torch::Tensor &tensor) { switch (tensor.scalar_type()) { case at::ScalarType::Double: { auto val = std::is_same<std::remove_const_t<T>, double>::value; TV_ASSERT_RT_ERR(val, "error"); break; } case at::ScalarType::Float: { auto val = std::is_same<std::remove_const_t<T>, float>::value; TV_ASSERT_RT_ERR(val, "error"); break; } case at::ScalarType::Int: { auto val = std::is_same<std::remove_const_t<T>, int>::value; TV_ASSERT_RT_ERR(val, "error"); break; } case at::ScalarType::Half: { auto val = std::is_same<std::remove_const_t<T>, at::Half>::value; TV_ASSERT_RT_ERR(val, "error"); break; } case at::ScalarType::Long: { auto val = std::is_same<std::remove_const_t<T>, long>::value; TV_ASSERT_RT_ERR(val, "error"); break; } default: TV_ASSERT_RT_ERR(false, "error"); } } namespace detail { template <typename T> struct TypeToTorchDtypeTraits; template <> struct TypeToTorchDtypeTraits<int32_t> { static constexpr decltype(torch::kInt32) value = torch::kInt32; }; template <> struct TypeToTorchDtypeTraits<int64_t> { static constexpr decltype(torch::kInt32) value = torch::kInt64; }; template <> struct TypeToTorchDtypeTraits<float> { static constexpr decltype(torch::kInt32) value = torch::kFloat32; }; template <> struct TypeToTorchDtypeTraits<double> { static constexpr decltype(torch::kInt32) value = torch::kFloat64; }; template <> struct TypeToTorchDtypeTraits<at::Half> { static constexpr decltype(torch::kInt32) value = torch::kHalf; }; } // namespace detail template <typename T> constexpr decltype(torch::kInt32) torch_type_v = detail::TypeToTorchDtypeTraits<T>::value; template <typename T> tv::TensorView<T> torch2tv(const torch::Tensor &tensor) { check_torch_dtype<T>(tensor); tv::Shape shape; for (auto i : tensor.sizes()) { shape.push_back(i); } return tv::TensorView<T>(tensor.data_ptr<std::remove_const_t<T>>(), shape); } namespace detail { template <> struct TypeToString<at::Half> { static constexpr const char *value = "half"; }; } // namespace detail template <class... Ts, typename F> void dispatch_torch(at::ScalarType t, F &&f) { static_assert(sizeof...(Ts) > 0, "you need to provide at least one type"); bool notFound = true; spconv::tv::mp_for_each<spconv::mp_list<Ts...>>([=, &notFound, &f](auto I) { if (torch_type_v<decltype(I)> == t) { std::forward<F>(f)(decltype(I)()); notFound = false; } }); if (notFound) { std::stringstream ss; spconv::tv::mp_for_each<spconv::mp_list<Ts...>>([=, &ss](auto I) { ss << tv::detail::TypeToString<decltype(I)>::value << " "; }); TV_THROW_RT_ERR("unknown type", t, ", available: ", ss.str()); } } } // namespace tv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tsl/robin_growth_policy.h
C/C++ Header
/** * MIT License * * Copyright (c) 2017 Tessil * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef TSL_ROBIN_GROWTH_POLICY_H #define TSL_ROBIN_GROWTH_POLICY_H #include <algorithm> #include <array> #include <climits> #include <cmath> #include <cstddef> #include <iterator> #include <limits> #include <ratio> #include <stdexcept> #ifdef TSL_DEBUG #define tsl_rh_assert(expr) assert(expr) #else #define tsl_rh_assert(expr) (static_cast<void>(0)) #endif /** * If exceptions are enabled, throw the exception passed in parameter, otherwise * call std::terminate. */ #if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || \ (defined(_MSC_VER) && defined(_CPPUNWIND))) && \ !defined(TSL_NO_EXCEPTIONS) #define TSL_RH_THROW_OR_TERMINATE(ex, msg) throw ex(msg) #else #ifdef NDEBUG #define TSL_RH_THROW_OR_TERMINATE(ex, msg) std::terminate() #else #include <cstdio> #define TSL_RH_THROW_OR_TERMINATE(ex, msg) \ do { \ std::fprintf(stderr, msg); \ std::terminate(); \ } while (0) #endif #endif #if defined(__GNUC__) || defined(__clang__) #define TSL_RH_LIKELY(exp) (__builtin_expect(!!(exp), true)) #else #define TSL_RH_LIKELY(exp) (exp) #endif namespace tsl { namespace rh { /** * Grow the hash table by a factor of GrowthFactor keeping the bucket count to a * power of two. It allows the table to use a mask operation instead of a modulo * operation to map a hash to a bucket. * * GrowthFactor must be a power of two >= 2. */ template <std::size_t GrowthFactor> class power_of_two_growth_policy { public: /** * Called on the hash table creation and on rehash. The number of buckets for * the table is passed in parameter. This number is a minimum, the policy may * update this value with a higher value if needed (but not lower). * * If 0 is given, min_bucket_count_in_out must still be 0 after the policy * creation and bucket_for_hash must always return 0 in this case. */ explicit power_of_two_growth_policy(std::size_t &min_bucket_count_in_out) { if (min_bucket_count_in_out > max_bucket_count()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } if (min_bucket_count_in_out > 0) { min_bucket_count_in_out = round_up_to_power_of_two(min_bucket_count_in_out); m_mask = min_bucket_count_in_out - 1; } else { m_mask = 0; } } /** * Return the bucket [0, bucket_count()) to which the hash belongs. * If bucket_count() is 0, it must always return 0. */ std::size_t bucket_for_hash(std::size_t hash) const noexcept { return hash & m_mask; } /** * Return the number of buckets that should be used on next growth. */ std::size_t next_bucket_count() const { if ((m_mask + 1) > max_bucket_count() / GrowthFactor) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } return (m_mask + 1) * GrowthFactor; } /** * Return the maximum number of buckets supported by the policy. */ std::size_t max_bucket_count() const { // Largest power of two. return (std::numeric_limits<std::size_t>::max() / 2) + 1; } /** * Reset the growth policy as if it was created with a bucket count of 0. * After a clear, the policy must always return 0 when bucket_for_hash is * called. */ void clear() noexcept { m_mask = 0; } private: static std::size_t round_up_to_power_of_two(std::size_t value) { if (is_power_of_two(value)) { return value; } if (value == 0) { return 1; } --value; for (std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) { value |= value >> i; } return value + 1; } static constexpr bool is_power_of_two(std::size_t value) { return value != 0 && (value & (value - 1)) == 0; } protected: static_assert(is_power_of_two(GrowthFactor) && GrowthFactor >= 2, "GrowthFactor must be a power of two >= 2."); std::size_t m_mask; }; /** * Grow the hash table by GrowthFactor::num / GrowthFactor::den and use a modulo * to map a hash to a bucket. Slower but it can be useful if you want a slower * growth. */ template <class GrowthFactor = std::ratio<3, 2>> class mod_growth_policy { public: explicit mod_growth_policy(std::size_t &min_bucket_count_in_out) { if (min_bucket_count_in_out > max_bucket_count()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } if (min_bucket_count_in_out > 0) { m_mod = min_bucket_count_in_out; } else { m_mod = 1; } } std::size_t bucket_for_hash(std::size_t hash) const noexcept { return hash % m_mod; } std::size_t next_bucket_count() const { if (m_mod == max_bucket_count()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } const double next_bucket_count = std::ceil(double(m_mod) * REHASH_SIZE_MULTIPLICATION_FACTOR); if (!std::isnormal(next_bucket_count)) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } if (next_bucket_count > double(max_bucket_count())) { return max_bucket_count(); } else { return std::size_t(next_bucket_count); } } std::size_t max_bucket_count() const { return MAX_BUCKET_COUNT; } void clear() noexcept { m_mod = 1; } private: static constexpr double REHASH_SIZE_MULTIPLICATION_FACTOR = 1.0 * GrowthFactor::num / GrowthFactor::den; static const std::size_t MAX_BUCKET_COUNT = std::size_t(double(std::numeric_limits<std::size_t>::max() / REHASH_SIZE_MULTIPLICATION_FACTOR)); static_assert(REHASH_SIZE_MULTIPLICATION_FACTOR >= 1.1, "Growth factor should be >= 1.1."); std::size_t m_mod; }; namespace detail { static constexpr const std::array<std::size_t, 40> PRIMES = { {1ul, 5ul, 17ul, 29ul, 37ul, 53ul, 67ul, 79ul, 97ul, 131ul, 193ul, 257ul, 389ul, 521ul, 769ul, 1031ul, 1543ul, 2053ul, 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul}}; template <unsigned int IPrime> static constexpr std::size_t mod(std::size_t hash) { return hash % PRIMES[IPrime]; } // MOD_PRIME[iprime](hash) returns hash % PRIMES[iprime]. This table allows for // faster modulo as the compiler can optimize the modulo code better with a // constant known at the compilation. static constexpr const std::array<std::size_t (*)(std::size_t), 40> MOD_PRIME = {{&mod<0>, &mod<1>, &mod<2>, &mod<3>, &mod<4>, &mod<5>, &mod<6>, &mod<7>, &mod<8>, &mod<9>, &mod<10>, &mod<11>, &mod<12>, &mod<13>, &mod<14>, &mod<15>, &mod<16>, &mod<17>, &mod<18>, &mod<19>, &mod<20>, &mod<21>, &mod<22>, &mod<23>, &mod<24>, &mod<25>, &mod<26>, &mod<27>, &mod<28>, &mod<29>, &mod<30>, &mod<31>, &mod<32>, &mod<33>, &mod<34>, &mod<35>, &mod<36>, &mod<37>, &mod<38>, &mod<39>}}; } // namespace detail /** * Grow the hash table by using prime numbers as bucket count. Slower than * tsl::rh::power_of_two_growth_policy in general but will probably distribute * the values around better in the buckets with a poor hash function. * * To allow the compiler to optimize the modulo operation, a lookup table is * used with constant primes numbers. * * With a switch the code would look like: * \code * switch(iprime) { // iprime is the current prime of the hash table * case 0: hash % 5ul; * break; * case 1: hash % 17ul; * break; * case 2: hash % 29ul; * break; * ... * } * \endcode * * Due to the constant variable in the modulo the compiler is able to optimize * the operation by a series of multiplications, substractions and shifts. * * The 'hash % 5' could become something like 'hash - (hash * 0xCCCCCCCD) >> 34) * * 5' in a 64 bits environement. */ class prime_growth_policy { public: explicit prime_growth_policy(std::size_t &min_bucket_count_in_out) { auto it_prime = std::lower_bound( detail::PRIMES.begin(), detail::PRIMES.end(), min_bucket_count_in_out); if (it_prime == detail::PRIMES.end()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } m_iprime = static_cast<unsigned int>( std::distance(detail::PRIMES.begin(), it_prime)); if (min_bucket_count_in_out > 0) { min_bucket_count_in_out = *it_prime; } else { min_bucket_count_in_out = 0; } } std::size_t bucket_for_hash(std::size_t hash) const noexcept { return detail::MOD_PRIME[m_iprime](hash); } std::size_t next_bucket_count() const { if (m_iprime + 1 >= detail::PRIMES.size()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The hash table exceeds its maxmimum size."); } return detail::PRIMES[m_iprime + 1]; } std::size_t max_bucket_count() const { return detail::PRIMES.back(); } void clear() noexcept { m_iprime = 0; } private: unsigned int m_iprime; static_assert(std::numeric_limits<decltype(m_iprime)>::max() >= detail::PRIMES.size(), "The type of m_iprime is not big enough."); }; } // namespace rh } // namespace tsl #endif
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tsl/robin_hash.h
C/C++ Header
/** * MIT License * * Copyright (c) 2017 Tessil * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef TSL_ROBIN_HASH_H #define TSL_ROBIN_HASH_H #include "robin_growth_policy.h" #include <algorithm> #include <cassert> #include <cmath> #include <cstddef> #include <cstdint> #include <exception> #include <iterator> #include <limits> #include <memory> #include <stdexcept> #include <tuple> #include <type_traits> #include <utility> #include <vector> namespace tsl { namespace detail_robin_hash { template <typename T> struct make_void { using type = void; }; template <typename T, typename = void> struct has_is_transparent : std::false_type {}; template <typename T> struct has_is_transparent<T, typename make_void<typename T::is_transparent>::type> : std::true_type {}; template <typename U> struct is_power_of_two_policy : std::false_type {}; template <std::size_t GrowthFactor> struct is_power_of_two_policy<tsl::rh::power_of_two_growth_policy<GrowthFactor>> : std::true_type {}; // Only available in C++17, we need to be compatible with C++11 template <class T> const T &clamp(const T &v, const T &lo, const T &hi) { return std::min(hi, std::max(lo, v)); } using truncated_hash_type = std::uint_least32_t; /** * Helper class that stores a truncated hash if StoreHash is true and nothing * otherwise. */ template <bool StoreHash> class bucket_entry_hash { public: bool bucket_hash_equal(std::size_t /*hash*/) const noexcept { return true; } truncated_hash_type truncated_hash() const noexcept { return 0; } protected: void set_hash(truncated_hash_type /*hash*/) noexcept {} }; template <> class bucket_entry_hash<true> { public: bool bucket_hash_equal(std::size_t hash) const noexcept { return m_hash == truncated_hash_type(hash); } truncated_hash_type truncated_hash() const noexcept { return m_hash; } protected: void set_hash(truncated_hash_type hash) noexcept { m_hash = truncated_hash_type(hash); } private: truncated_hash_type m_hash; }; /** * Each bucket entry has: * - A value of type `ValueType`. * - An integer to store how far the value of the bucket, if any, is from its * ideal bucket (ex: if the current bucket 5 has the value 'foo' and * `hash('foo') % nb_buckets` == 3, `dist_from_ideal_bucket()` will return 2 as * the current value of the bucket is two buckets away from its ideal bucket) If * there is no value in the bucket (i.e. `empty()` is true) * `dist_from_ideal_bucket()` will be < 0. * - A marker which tells us if the bucket is the last bucket of the bucket * array (useful for the iterator of the hash table). * - If `StoreHash` is true, 32 bits of the hash of the value, if any, are also * stored in the bucket. If the size of the hash is more than 32 bits, it is * truncated. We don't store the full hash as storing the hash is a potential * opportunity to use the unused space due to the alignement of the bucket_entry * structure. We can thus potentially store the hash without any extra space * (which would not be possible with 64 bits of the hash). */ template <typename ValueType, bool StoreHash> class bucket_entry : public bucket_entry_hash<StoreHash> { using bucket_hash = bucket_entry_hash<StoreHash>; public: using value_type = ValueType; using distance_type = std::int_least16_t; bucket_entry() noexcept : bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), m_last_bucket(false) { tsl_rh_assert(empty()); } bucket_entry(bool last_bucket) noexcept : bucket_hash(), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), m_last_bucket(last_bucket) { tsl_rh_assert(empty()); } bucket_entry(const bucket_entry &other) noexcept( std::is_nothrow_copy_constructible<value_type>::value) : bucket_hash(other), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), m_last_bucket(other.m_last_bucket) { if (!other.empty()) { ::new (static_cast<void *>(std::addressof(m_value))) value_type(other.value()); m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; } } /** * Never really used, but still necessary as we must call resize on an empty * `std::vector<bucket_entry>`. and we need to support move-only types. See * robin_hash constructor for details. */ bucket_entry(bucket_entry &&other) noexcept( std::is_nothrow_move_constructible<value_type>::value) : bucket_hash(std::move(other)), m_dist_from_ideal_bucket(EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET), m_last_bucket(other.m_last_bucket) { if (!other.empty()) { ::new (static_cast<void *>(std::addressof(m_value))) value_type(std::move(other.value())); m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; } } bucket_entry &operator=(const bucket_entry &other) noexcept( std::is_nothrow_copy_constructible<value_type>::value) { if (this != &other) { clear(); bucket_hash::operator=(other); if (!other.empty()) { ::new (static_cast<void *>(std::addressof(m_value))) value_type(other.value()); } m_dist_from_ideal_bucket = other.m_dist_from_ideal_bucket; m_last_bucket = other.m_last_bucket; } return *this; } bucket_entry &operator=(bucket_entry &&) = delete; ~bucket_entry() noexcept { clear(); } void clear() noexcept { if (!empty()) { destroy_value(); m_dist_from_ideal_bucket = EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; } } bool empty() const noexcept { return m_dist_from_ideal_bucket == EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET; } value_type &value() noexcept { tsl_rh_assert(!empty()); return *reinterpret_cast<value_type *>(std::addressof(m_value)); } const value_type &value() const noexcept { tsl_rh_assert(!empty()); return *reinterpret_cast<const value_type *>(std::addressof(m_value)); } distance_type dist_from_ideal_bucket() const noexcept { return m_dist_from_ideal_bucket; } bool last_bucket() const noexcept { return m_last_bucket; } void set_as_last_bucket() noexcept { m_last_bucket = true; } template <typename... Args> void set_value_of_empty_bucket(distance_type dist_from_ideal_bucket, truncated_hash_type hash, Args &&... value_type_args) { tsl_rh_assert(dist_from_ideal_bucket >= 0); tsl_rh_assert(empty()); ::new (static_cast<void *>(std::addressof(m_value))) value_type(std::forward<Args>(value_type_args)...); this->set_hash(hash); m_dist_from_ideal_bucket = dist_from_ideal_bucket; tsl_rh_assert(!empty()); } void swap_with_value_in_bucket(distance_type &dist_from_ideal_bucket, truncated_hash_type &hash, value_type &value) { tsl_rh_assert(!empty()); using std::swap; swap(value, this->value()); swap(dist_from_ideal_bucket, m_dist_from_ideal_bucket); // Avoid warning of unused variable if StoreHash is false (void)hash; if (StoreHash) { const truncated_hash_type tmp_hash = this->truncated_hash(); this->set_hash(hash); hash = tmp_hash; } } static truncated_hash_type truncate_hash(std::size_t hash) noexcept { return truncated_hash_type(hash); } private: void destroy_value() noexcept { tsl_rh_assert(!empty()); value().~value_type(); } private: using storage = typename std::aligned_storage<sizeof(value_type), alignof(value_type)>::type; static const distance_type EMPTY_MARKER_DIST_FROM_IDEAL_BUCKET = -1; distance_type m_dist_from_ideal_bucket; bool m_last_bucket; storage m_value; }; /** * Internal common class used by `robin_map` and `robin_set`. * * ValueType is what will be stored by `robin_hash` (usually `std::pair<Key, T>` * for map and `Key` for set). * * `KeySelect` should be a `FunctionObject` which takes a `ValueType` in * parameter and returns a reference to the key. * * `ValueSelect` should be a `FunctionObject` which takes a `ValueType` in * parameter and returns a reference to the value. `ValueSelect` should be void * if there is no value (in a set for example). * * The strong exception guarantee only holds if the expression * `std::is_nothrow_swappable<ValueType>::value && * std::is_nothrow_move_constructible<ValueType>::value` is true. * * Behaviour is undefined if the destructor of `ValueType` throws. */ template <class ValueType, class KeySelect, class ValueSelect, class Hash, class KeyEqual, class Allocator, bool StoreHash, class GrowthPolicy> class robin_hash : private Hash, private KeyEqual, private GrowthPolicy { private: template <typename U> using has_mapped_type = typename std::integral_constant<bool, !std::is_same<U, void>::value>; static_assert( noexcept(std::declval<GrowthPolicy>().bucket_for_hash(std::size_t(0))), "GrowthPolicy::bucket_for_hash must be noexcept."); static_assert(noexcept(std::declval<GrowthPolicy>().clear()), "GrowthPolicy::clear must be noexcept."); public: template <bool IsConst> class robin_iterator; using key_type = typename KeySelect::key_type; using value_type = ValueType; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = Hash; using key_equal = KeyEqual; using allocator_type = Allocator; using reference = value_type &; using const_reference = const value_type &; using pointer = value_type *; using const_pointer = const value_type *; using iterator = robin_iterator<false>; using const_iterator = robin_iterator<true>; private: /** * Either store the hash because we are asked by the `StoreHash` template * parameter or store the hash because it doesn't cost us anything in size and * can be used to speed up rehash. */ static constexpr bool STORE_HASH = StoreHash || ((sizeof(tsl::detail_robin_hash::bucket_entry<value_type, true>) == sizeof(tsl::detail_robin_hash::bucket_entry<value_type, false>)) && (sizeof(std::size_t) == sizeof(truncated_hash_type) || is_power_of_two_policy<GrowthPolicy>::value) && // Don't store the hash for primitive types with default hash. (!std::is_arithmetic<key_type>::value || !std::is_same<Hash, std::hash<key_type>>::value)); /** * Only use the stored hash on lookup if we are explictly asked. We are not * sure how slow the KeyEqual operation is. An extra comparison may slow * things down with a fast KeyEqual. */ static constexpr bool USE_STORED_HASH_ON_LOOKUP = StoreHash; /** * We can only use the hash on rehash if the size of the hash type is the same * as the stored one or if we use a power of two modulo. In the case of the * power of two modulo, we just mask the least significant bytes, we just have * to check that the truncated_hash_type didn't truncated more bytes. */ static bool USE_STORED_HASH_ON_REHASH(size_type bucket_count) { (void)bucket_count; if (STORE_HASH && sizeof(std::size_t) == sizeof(truncated_hash_type)) { return true; } else if (STORE_HASH && is_power_of_two_policy<GrowthPolicy>::value) { tsl_rh_assert(bucket_count > 0); return (bucket_count - 1) <= std::numeric_limits<truncated_hash_type>::max(); } else { return false; } } using bucket_entry = tsl::detail_robin_hash::bucket_entry<value_type, STORE_HASH>; using distance_type = typename bucket_entry::distance_type; using buckets_allocator = typename std::allocator_traits< allocator_type>::template rebind_alloc<bucket_entry>; using buckets_container_type = std::vector<bucket_entry, buckets_allocator>; public: /** * The 'operator*()' and 'operator->()' methods return a const reference and * const pointer respectively to the stored value type. * * In case of a map, to get a mutable reference to the value associated to a * key (the '.second' in the stored pair), you have to call 'value()'. * * The main reason for this is that if we returned a `std::pair<Key, T>&` * instead of a `const std::pair<Key, T>&`, the user may modify the key which * will put the map in a undefined state. */ template <bool IsConst> class robin_iterator { friend class robin_hash; private: using bucket_entry_ptr = typename std::conditional<IsConst, const bucket_entry *, bucket_entry *>::type; robin_iterator(bucket_entry_ptr bucket) noexcept : m_bucket(bucket) {} public: using iterator_category = std::forward_iterator_tag; using value_type = const typename robin_hash::value_type; using difference_type = std::ptrdiff_t; using reference = value_type &; using pointer = value_type *; robin_iterator() noexcept {} // Copy constructor from iterator to const_iterator. template <bool TIsConst = IsConst, typename std::enable_if<TIsConst>::type * = nullptr> robin_iterator(const robin_iterator<!TIsConst> &other) noexcept : m_bucket(other.m_bucket) {} robin_iterator(const robin_iterator &other) = default; robin_iterator(robin_iterator &&other) = default; robin_iterator &operator=(const robin_iterator &other) = default; robin_iterator &operator=(robin_iterator &&other) = default; const typename robin_hash::key_type &key() const { return KeySelect()(m_bucket->value()); } template <class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && IsConst>::type * = nullptr> const typename U::value_type &value() const { return U()(m_bucket->value()); } template <class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && !IsConst>::type * = nullptr> typename U::value_type &value() { return U()(m_bucket->value()); } reference operator*() const { return m_bucket->value(); } pointer operator->() const { return std::addressof(m_bucket->value()); } robin_iterator &operator++() { while (true) { if (m_bucket->last_bucket()) { ++m_bucket; return *this; } ++m_bucket; if (!m_bucket->empty()) { return *this; } } } robin_iterator operator++(int) { robin_iterator tmp(*this); ++*this; return tmp; } friend bool operator==(const robin_iterator &lhs, const robin_iterator &rhs) { return lhs.m_bucket == rhs.m_bucket; } friend bool operator!=(const robin_iterator &lhs, const robin_iterator &rhs) { return !(lhs == rhs); } private: bucket_entry_ptr m_bucket; }; public: #if defined(__cplusplus) && __cplusplus >= 201402L robin_hash(size_type bucket_count, const Hash &hash, const KeyEqual &equal, const Allocator &alloc, float min_load_factor = DEFAULT_MIN_LOAD_FACTOR, float max_load_factor = DEFAULT_MAX_LOAD_FACTOR) : Hash(hash), KeyEqual(equal), GrowthPolicy(bucket_count), m_buckets_data( [&]() { if (bucket_count > max_bucket_count()) { TSL_RH_THROW_OR_TERMINATE( std::length_error, "The map exceeds its maximum bucket count."); } return bucket_count; }(), alloc), m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() : m_buckets_data.data()), m_bucket_count(bucket_count), m_nb_elements(0), m_grow_on_next_insert(false), m_try_skrink_on_next_insert(false) { if (m_bucket_count > 0) { tsl_rh_assert(!m_buckets_data.empty()); m_buckets_data.back().set_as_last_bucket(); } this->min_load_factor(min_load_factor); this->max_load_factor(max_load_factor); } #else /** * C++11 doesn't support the creation of a std::vector with a custom allocator * and 'count' default-inserted elements. The needed contructor `explicit * vector(size_type count, const Allocator& alloc = Allocator());` is only * available in C++14 and later. We thus must resize after using the * `vector(const Allocator& alloc)` constructor. * * We can't use `vector(size_type count, const T& value, const Allocator& * alloc)` as it requires the value T to be copyable. */ robin_hash(size_type bucket_count, const Hash &hash, const KeyEqual &equal, const Allocator &alloc, float min_load_factor = DEFAULT_MIN_LOAD_FACTOR, float max_load_factor = DEFAULT_MAX_LOAD_FACTOR) : Hash(hash), KeyEqual(equal), GrowthPolicy(bucket_count), m_buckets_data(alloc), m_buckets(static_empty_bucket_ptr()), m_bucket_count(bucket_count), m_nb_elements(0), m_grow_on_next_insert(false), m_try_skrink_on_next_insert(false) { if (bucket_count > max_bucket_count()) { TSL_RH_THROW_OR_TERMINATE(std::length_error, "The map exceeds its maxmimum bucket count."); } if (m_bucket_count > 0) { m_buckets_data.resize(m_bucket_count); m_buckets = m_buckets_data.data(); tsl_rh_assert(!m_buckets_data.empty()); m_buckets_data.back().set_as_last_bucket(); } this->min_load_factor(min_load_factor); this->max_load_factor(max_load_factor); } #endif robin_hash(const robin_hash &other) : Hash(other), KeyEqual(other), GrowthPolicy(other), m_buckets_data(other.m_buckets_data), m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() : m_buckets_data.data()), m_bucket_count(other.m_bucket_count), m_nb_elements(other.m_nb_elements), m_load_threshold(other.m_load_threshold), m_max_load_factor(other.m_max_load_factor), m_grow_on_next_insert(other.m_grow_on_next_insert), m_min_load_factor(other.m_min_load_factor), m_try_skrink_on_next_insert(other.m_try_skrink_on_next_insert) {} robin_hash(robin_hash &&other) noexcept( std::is_nothrow_move_constructible< Hash>::value &&std::is_nothrow_move_constructible<KeyEqual>::value &&std::is_nothrow_move_constructible<GrowthPolicy>::value && std::is_nothrow_move_constructible<buckets_container_type>::value) : Hash(std::move(static_cast<Hash &>(other))), KeyEqual(std::move(static_cast<KeyEqual &>(other))), GrowthPolicy(std::move(static_cast<GrowthPolicy &>(other))), m_buckets_data(std::move(other.m_buckets_data)), m_buckets(m_buckets_data.empty() ? static_empty_bucket_ptr() : m_buckets_data.data()), m_bucket_count(other.m_bucket_count), m_nb_elements(other.m_nb_elements), m_load_threshold(other.m_load_threshold), m_max_load_factor(other.m_max_load_factor), m_grow_on_next_insert(other.m_grow_on_next_insert), m_min_load_factor(other.m_min_load_factor), m_try_skrink_on_next_insert(other.m_try_skrink_on_next_insert) { other.GrowthPolicy::clear(); other.m_buckets_data.clear(); other.m_buckets = static_empty_bucket_ptr(); other.m_bucket_count = 0; other.m_nb_elements = 0; other.m_load_threshold = 0; other.m_grow_on_next_insert = false; other.m_try_skrink_on_next_insert = false; } robin_hash &operator=(const robin_hash &other) { if (&other != this) { Hash::operator=(other); KeyEqual::operator=(other); GrowthPolicy::operator=(other); m_buckets_data = other.m_buckets_data; m_buckets = m_buckets_data.empty() ? static_empty_bucket_ptr() : m_buckets_data.data(); m_bucket_count = other.m_bucket_count; m_nb_elements = other.m_nb_elements; m_load_threshold = other.m_load_threshold; m_max_load_factor = other.m_max_load_factor; m_grow_on_next_insert = other.m_grow_on_next_insert; m_min_load_factor = other.m_min_load_factor; m_try_skrink_on_next_insert = other.m_try_skrink_on_next_insert; } return *this; } robin_hash &operator=(robin_hash &&other) { other.swap(*this); other.clear(); return *this; } allocator_type get_allocator() const { return m_buckets_data.get_allocator(); } /* * Iterators */ iterator begin() noexcept { std::size_t i = 0; while (i < m_bucket_count && m_buckets[i].empty()) { i++; } return iterator(m_buckets + i); } const_iterator begin() const noexcept { return cbegin(); } const_iterator cbegin() const noexcept { std::size_t i = 0; while (i < m_bucket_count && m_buckets[i].empty()) { i++; } return const_iterator(m_buckets + i); } iterator end() noexcept { return iterator(m_buckets + m_bucket_count); } const_iterator end() const noexcept { return cend(); } const_iterator cend() const noexcept { return const_iterator(m_buckets + m_bucket_count); } /* * Capacity */ bool empty() const noexcept { return m_nb_elements == 0; } size_type size() const noexcept { return m_nb_elements; } size_type max_size() const noexcept { return m_buckets_data.max_size(); } /* * Modifiers */ void clear() noexcept { for (auto &bucket : m_buckets_data) { bucket.clear(); } m_nb_elements = 0; m_grow_on_next_insert = false; } template <typename P> std::pair<iterator, bool> insert(P &&value) { return insert_impl(KeySelect()(value), std::forward<P>(value)); } template <typename P> iterator insert_hint(const_iterator hint, P &&value) { if (hint != cend() && compare_keys(KeySelect()(*hint), KeySelect()(value))) { return mutable_iterator(hint); } return insert(std::forward<P>(value)).first; } template <class InputIt> void insert(InputIt first, InputIt last) { if (std::is_base_of< std::forward_iterator_tag, typename std::iterator_traits<InputIt>::iterator_category>::value) { const auto nb_elements_insert = std::distance(first, last); const size_type nb_free_buckets = m_load_threshold - size(); tsl_rh_assert(m_load_threshold >= size()); if (nb_elements_insert > 0 && nb_free_buckets < size_type(nb_elements_insert)) { reserve(size() + size_type(nb_elements_insert)); } } for (; first != last; ++first) { insert(*first); } } template <class K, class M> std::pair<iterator, bool> insert_or_assign(K &&key, M &&obj) { auto it = try_emplace(std::forward<K>(key), std::forward<M>(obj)); if (!it.second) { it.first.value() = std::forward<M>(obj); } return it; } template <class K, class M> iterator insert_or_assign(const_iterator hint, K &&key, M &&obj) { if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { auto it = mutable_iterator(hint); it.value() = std::forward<M>(obj); return it; } return insert_or_assign(std::forward<K>(key), std::forward<M>(obj)).first; } template <class... Args> std::pair<iterator, bool> emplace(Args &&... args) { return insert(value_type(std::forward<Args>(args)...)); } template <class... Args> iterator emplace_hint(const_iterator hint, Args &&... args) { return insert_hint(hint, value_type(std::forward<Args>(args)...)); } template <class K, class... Args> std::pair<iterator, bool> try_emplace(K &&key, Args &&... args) { return insert_impl(key, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(key)), std::forward_as_tuple(std::forward<Args>(args)...)); } template <class K, class... Args> iterator try_emplace_hint(const_iterator hint, K &&key, Args &&... args) { if (hint != cend() && compare_keys(KeySelect()(*hint), key)) { return mutable_iterator(hint); } return try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first; } /** * Here to avoid `template<class K> size_type erase(const K& key)` being used * when we use an `iterator` instead of a `const_iterator`. */ iterator erase(iterator pos) { erase_from_bucket(pos); /** * Erase bucket used a backward shift after clearing the bucket. * Check if there is a new value in the bucket, if not get the next * non-empty. */ if (pos.m_bucket->empty()) { ++pos; } m_try_skrink_on_next_insert = true; return pos; } iterator erase(const_iterator pos) { return erase(mutable_iterator(pos)); } iterator erase(const_iterator first, const_iterator last) { if (first == last) { return mutable_iterator(first); } auto first_mutable = mutable_iterator(first); auto last_mutable = mutable_iterator(last); for (auto it = first_mutable.m_bucket; it != last_mutable.m_bucket; ++it) { if (!it->empty()) { it->clear(); m_nb_elements--; } } if (last_mutable == end()) { return end(); } /* * Backward shift on the values which come after the deleted values. * We try to move the values closer to their ideal bucket. */ std::size_t icloser_bucket = static_cast<std::size_t>(first_mutable.m_bucket - m_buckets); std::size_t ito_move_closer_value = static_cast<std::size_t>(last_mutable.m_bucket - m_buckets); tsl_rh_assert(ito_move_closer_value > icloser_bucket); const std::size_t ireturn_bucket = ito_move_closer_value - std::min( ito_move_closer_value - icloser_bucket, std::size_t( m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); while (ito_move_closer_value < m_bucket_count && m_buckets[ito_move_closer_value].dist_from_ideal_bucket() > 0) { icloser_bucket = ito_move_closer_value - std::min( ito_move_closer_value - icloser_bucket, std::size_t( m_buckets[ito_move_closer_value].dist_from_ideal_bucket())); tsl_rh_assert(m_buckets[icloser_bucket].empty()); const distance_type new_distance = distance_type( m_buckets[ito_move_closer_value].dist_from_ideal_bucket() - (ito_move_closer_value - icloser_bucket)); m_buckets[icloser_bucket].set_value_of_empty_bucket( new_distance, m_buckets[ito_move_closer_value].truncated_hash(), std::move(m_buckets[ito_move_closer_value].value())); m_buckets[ito_move_closer_value].clear(); ++icloser_bucket; ++ito_move_closer_value; } m_try_skrink_on_next_insert = true; return iterator(m_buckets + ireturn_bucket); } template <class K> size_type erase(const K &key) { return erase(key, hash_key(key)); } template <class K> size_type erase(const K &key, std::size_t hash) { auto it = find(key, hash); if (it != end()) { erase_from_bucket(it); m_try_skrink_on_next_insert = true; return 1; } else { return 0; } } void swap(robin_hash &other) { using std::swap; swap(static_cast<Hash &>(*this), static_cast<Hash &>(other)); swap(static_cast<KeyEqual &>(*this), static_cast<KeyEqual &>(other)); swap(static_cast<GrowthPolicy &>(*this), static_cast<GrowthPolicy &>(other)); swap(m_buckets_data, other.m_buckets_data); swap(m_buckets, other.m_buckets); swap(m_bucket_count, other.m_bucket_count); swap(m_nb_elements, other.m_nb_elements); swap(m_load_threshold, other.m_load_threshold); swap(m_max_load_factor, other.m_max_load_factor); swap(m_grow_on_next_insert, other.m_grow_on_next_insert); swap(m_min_load_factor, other.m_min_load_factor); swap(m_try_skrink_on_next_insert, other.m_try_skrink_on_next_insert); } /* * Lookup */ template < class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type * = nullptr> typename U::value_type &at(const K &key) { return at(key, hash_key(key)); } template < class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type * = nullptr> typename U::value_type &at(const K &key, std::size_t hash) { return const_cast<typename U::value_type &>( static_cast<const robin_hash *>(this)->at(key, hash)); } template < class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type * = nullptr> const typename U::value_type &at(const K &key) const { return at(key, hash_key(key)); } template < class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type * = nullptr> const typename U::value_type &at(const K &key, std::size_t hash) const { auto it = find(key, hash); if (it != cend()) { return it.value(); } else { TSL_RH_THROW_OR_TERMINATE(std::out_of_range, "Couldn't find key."); } } template < class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type * = nullptr> typename U::value_type &operator[](K &&key) { return try_emplace(std::forward<K>(key)).first.value(); } template <class K> size_type count(const K &key) const { return count(key, hash_key(key)); } template <class K> size_type count(const K &key, std::size_t hash) const { if (find(key, hash) != cend()) { return 1; } else { return 0; } } template <class K> iterator find(const K &key) { return find_impl(key, hash_key(key)); } template <class K> iterator find(const K &key, std::size_t hash) { return find_impl(key, hash); } template <class K> const_iterator find(const K &key) const { return find_impl(key, hash_key(key)); } template <class K> const_iterator find(const K &key, std::size_t hash) const { return find_impl(key, hash); } template <class K> std::pair<iterator, iterator> equal_range(const K &key) { return equal_range(key, hash_key(key)); } template <class K> std::pair<iterator, iterator> equal_range(const K &key, std::size_t hash) { iterator it = find(key, hash); return std::make_pair(it, (it == end()) ? it : std::next(it)); } template <class K> std::pair<const_iterator, const_iterator> equal_range(const K &key) const { return equal_range(key, hash_key(key)); } template <class K> std::pair<const_iterator, const_iterator> equal_range(const K &key, std::size_t hash) const { const_iterator it = find(key, hash); return std::make_pair(it, (it == cend()) ? it : std::next(it)); } /* * Bucket interface */ size_type bucket_count() const { return m_bucket_count; } size_type max_bucket_count() const { return std::min(GrowthPolicy::max_bucket_count(), m_buckets_data.max_size()); } /* * Hash policy */ float load_factor() const { if (bucket_count() == 0) { return 0; } return float(m_nb_elements) / float(bucket_count()); } float min_load_factor() const { return m_min_load_factor; } float max_load_factor() const { return m_max_load_factor; } void min_load_factor(float ml) { m_min_load_factor = clamp(ml, float(MINIMUM_MIN_LOAD_FACTOR), float(MAXIMUM_MIN_LOAD_FACTOR)); } void max_load_factor(float ml) { m_max_load_factor = clamp(ml, float(MINIMUM_MAX_LOAD_FACTOR), float(MAXIMUM_MAX_LOAD_FACTOR)); m_load_threshold = size_type(float(bucket_count()) * m_max_load_factor); } void rehash(size_type count) { count = std::max(count, size_type(std::ceil(float(size()) / max_load_factor()))); rehash_impl(count); } void reserve(size_type count) { rehash(size_type(std::ceil(float(count) / max_load_factor()))); } /* * Observers */ hasher hash_function() const { return static_cast<const Hash &>(*this); } key_equal key_eq() const { return static_cast<const KeyEqual &>(*this); } /* * Other */ iterator mutable_iterator(const_iterator pos) { return iterator(const_cast<bucket_entry *>(pos.m_bucket)); } private: template <class K> std::size_t hash_key(const K &key) const { return Hash::operator()(key); } template <class K1, class K2> bool compare_keys(const K1 &key1, const K2 &key2) const { return KeyEqual::operator()(key1, key2); } std::size_t bucket_for_hash(std::size_t hash) const { const std::size_t bucket = GrowthPolicy::bucket_for_hash(hash); tsl_rh_assert(bucket < m_bucket_count || (bucket == 0 && m_bucket_count == 0)); return bucket; } template <class U = GrowthPolicy, typename std::enable_if<is_power_of_two_policy<U>::value>::type * = nullptr> std::size_t next_bucket(std::size_t index) const noexcept { tsl_rh_assert(index < bucket_count()); return (index + 1) & this->m_mask; } template <class U = GrowthPolicy, typename std::enable_if<!is_power_of_two_policy<U>::value>::type * = nullptr> std::size_t next_bucket(std::size_t index) const noexcept { tsl_rh_assert(index < bucket_count()); index++; return (index != bucket_count()) ? index : 0; } template <class K> iterator find_impl(const K &key, std::size_t hash) { return mutable_iterator( static_cast<const robin_hash *>(this)->find(key, hash)); } template <class K> const_iterator find_impl(const K &key, std::size_t hash) const { std::size_t ibucket = bucket_for_hash(hash); distance_type dist_from_ideal_bucket = 0; while (dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { if (TSL_RH_LIKELY( (!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && compare_keys(KeySelect()(m_buckets[ibucket].value()), key))) { return const_iterator(m_buckets + ibucket); } ibucket = next_bucket(ibucket); dist_from_ideal_bucket++; } return cend(); } void erase_from_bucket(iterator pos) { pos.m_bucket->clear(); m_nb_elements--; /** * Backward shift, swap the empty bucket, previous_ibucket, with the values * on its right, ibucket, until we cross another empty bucket or if the * other bucket has a distance_from_ideal_bucket == 0. * * We try to move the values closer to their ideal bucket. */ std::size_t previous_ibucket = static_cast<std::size_t>(pos.m_bucket - m_buckets); std::size_t ibucket = next_bucket(previous_ibucket); while (m_buckets[ibucket].dist_from_ideal_bucket() > 0) { tsl_rh_assert(m_buckets[previous_ibucket].empty()); const distance_type new_distance = distance_type(m_buckets[ibucket].dist_from_ideal_bucket() - 1); m_buckets[previous_ibucket].set_value_of_empty_bucket( new_distance, m_buckets[ibucket].truncated_hash(), std::move(m_buckets[ibucket].value())); m_buckets[ibucket].clear(); previous_ibucket = ibucket; ibucket = next_bucket(ibucket); } } template <class K, class... Args> std::pair<iterator, bool> insert_impl(const K &key, Args &&... value_type_args) { const std::size_t hash = hash_key(key); std::size_t ibucket = bucket_for_hash(hash); distance_type dist_from_ideal_bucket = 0; while (dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { if ((!USE_STORED_HASH_ON_LOOKUP || m_buckets[ibucket].bucket_hash_equal(hash)) && compare_keys(KeySelect()(m_buckets[ibucket].value()), key)) { return std::make_pair(iterator(m_buckets + ibucket), false); } ibucket = next_bucket(ibucket); dist_from_ideal_bucket++; } if (rehash_on_extreme_load()) { ibucket = bucket_for_hash(hash); dist_from_ideal_bucket = 0; while (dist_from_ideal_bucket <= m_buckets[ibucket].dist_from_ideal_bucket()) { ibucket = next_bucket(ibucket); dist_from_ideal_bucket++; } } if (m_buckets[ibucket].empty()) { m_buckets[ibucket].set_value_of_empty_bucket( dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), std::forward<Args>(value_type_args)...); } else { insert_value(ibucket, dist_from_ideal_bucket, bucket_entry::truncate_hash(hash), std::forward<Args>(value_type_args)...); } m_nb_elements++; /* * The value will be inserted in ibucket in any case, either because it was * empty or by stealing the bucket (robin hood). */ return std::make_pair(iterator(m_buckets + ibucket), true); } template <class... Args> void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, truncated_hash_type hash, Args &&... value_type_args) { value_type value(std::forward<Args>(value_type_args)...); insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); } void insert_value(std::size_t ibucket, distance_type dist_from_ideal_bucket, truncated_hash_type hash, value_type &&value) { insert_value_impl(ibucket, dist_from_ideal_bucket, hash, value); } /* * We don't use `value_type&& value` as last argument due to a bug in MSVC * when `value_type` is a pointer, The compiler is not able to see the * difference between `std::string*` and `std::string*&&` resulting in compile * error. * * The `value` will be in a moved state at the end of the function. */ void insert_value_impl(std::size_t ibucket, distance_type dist_from_ideal_bucket, truncated_hash_type hash, value_type &value) { m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); ibucket = next_bucket(ibucket); dist_from_ideal_bucket++; while (!m_buckets[ibucket].empty()) { if (dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { if (dist_from_ideal_bucket >= REHASH_ON_HIGH_NB_PROBES__NPROBES && load_factor() >= REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR) { /** * The number of probes is really high, rehash the map on the next * insert. Difficult to do now as rehash may throw an exception. */ m_grow_on_next_insert = true; } m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); } ibucket = next_bucket(ibucket); dist_from_ideal_bucket++; } m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); } void rehash_impl(size_type count) { robin_hash new_table(count, static_cast<Hash &>(*this), static_cast<KeyEqual &>(*this), get_allocator(), m_min_load_factor, m_max_load_factor); const bool use_stored_hash = USE_STORED_HASH_ON_REHASH(new_table.bucket_count()); for (auto &bucket : m_buckets_data) { if (bucket.empty()) { continue; } const std::size_t hash = use_stored_hash ? bucket.truncated_hash() : new_table.hash_key(KeySelect()(bucket.value())); new_table.insert_value_on_rehash(new_table.bucket_for_hash(hash), 0, bucket_entry::truncate_hash(hash), std::move(bucket.value())); } new_table.m_nb_elements = m_nb_elements; new_table.swap(*this); } void insert_value_on_rehash(std::size_t ibucket, distance_type dist_from_ideal_bucket, truncated_hash_type hash, value_type &&value) { while (true) { if (dist_from_ideal_bucket > m_buckets[ibucket].dist_from_ideal_bucket()) { if (m_buckets[ibucket].empty()) { m_buckets[ibucket].set_value_of_empty_bucket(dist_from_ideal_bucket, hash, std::move(value)); return; } else { m_buckets[ibucket].swap_with_value_in_bucket(dist_from_ideal_bucket, hash, value); } } dist_from_ideal_bucket++; ibucket = next_bucket(ibucket); } } /** * Grow the table if m_grow_on_next_insert is true or we reached the * max_load_factor. Shrink the table if m_try_skrink_on_next_insert is true * (an erase occured) and we're below the min_load_factor. * * Return true if the table has been rehashed. */ bool rehash_on_extreme_load() { if (m_grow_on_next_insert || size() >= m_load_threshold) { rehash_impl(GrowthPolicy::next_bucket_count()); m_grow_on_next_insert = false; return true; } if (m_try_skrink_on_next_insert) { m_try_skrink_on_next_insert = false; if (m_min_load_factor != 0.0f && load_factor() < m_min_load_factor) { reserve(size() + 1); return true; } } return false; } public: static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0; static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.5f; static constexpr float MINIMUM_MAX_LOAD_FACTOR = 0.2f; static constexpr float MAXIMUM_MAX_LOAD_FACTOR = 0.95f; static constexpr float DEFAULT_MIN_LOAD_FACTOR = 0.0f; static constexpr float MINIMUM_MIN_LOAD_FACTOR = 0.0f; static constexpr float MAXIMUM_MIN_LOAD_FACTOR = 0.15f; static_assert(MINIMUM_MAX_LOAD_FACTOR < MAXIMUM_MAX_LOAD_FACTOR, "MINIMUM_MAX_LOAD_FACTOR should be < MAXIMUM_MAX_LOAD_FACTOR"); static_assert(MINIMUM_MIN_LOAD_FACTOR < MAXIMUM_MIN_LOAD_FACTOR, "MINIMUM_MIN_LOAD_FACTOR should be < MAXIMUM_MIN_LOAD_FACTOR"); static_assert(MAXIMUM_MIN_LOAD_FACTOR < MINIMUM_MAX_LOAD_FACTOR, "MAXIMUM_MIN_LOAD_FACTOR should be < MINIMUM_MAX_LOAD_FACTOR"); private: static const distance_type REHASH_ON_HIGH_NB_PROBES__NPROBES = 128; static constexpr float REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR = 0.15f; /** * Return an always valid pointer to an static empty bucket_entry with * last_bucket() == true. */ bucket_entry *static_empty_bucket_ptr() { static bucket_entry empty_bucket(true); return &empty_bucket; } private: buckets_container_type m_buckets_data; /** * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points * to static_empty_bucket_ptr. This variable is useful to avoid the cost of * checking if m_buckets_data is empty when trying to find an element. * * TODO Remove m_buckets_data and only use a pointer instead of a * pointer+vector to save some space in the robin_hash object. Manage the * Allocator manually. */ bucket_entry *m_buckets; /** * Used a lot in find, avoid the call to m_buckets_data.size() which is a bit * slower. */ size_type m_bucket_count; size_type m_nb_elements; size_type m_load_threshold; float m_max_load_factor; bool m_grow_on_next_insert; float m_min_load_factor; /** * We can't shrink down the map on erase operations as the erase methods need * to return the next iterator. Shrinking the map would invalidate all the * iterators and we could not return the next iterator in a meaningful way, On * erase, we thus just indicate on erase that we should try to shrink the hash * table on the next insert if we go below the min_load_factor. */ bool m_try_skrink_on_next_insert; }; } // namespace detail_robin_hash } // namespace tsl #endif
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/tsl/robin_map.h
C/C++ Header
/** * MIT License * * Copyright (c) 2017 Tessil * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef TSL_ROBIN_MAP_H #define TSL_ROBIN_MAP_H #include "robin_hash.h" #include <cstddef> #include <functional> #include <initializer_list> #include <memory> #include <type_traits> #include <utility> namespace tsl { /** * Implementation of a hash map using open-adressing and the robin hood hashing * algorithm with backward shift deletion. * * For operations modifying the hash map (insert, erase, rehash, ...), the * strong exception guarantee is only guaranteed when the expression * `std::is_nothrow_swappable<std::pair<Key, T>>::value && * std::is_nothrow_move_constructible<std::pair<Key, T>>::value` is true, * otherwise if an exception is thrown during the swap or the move, the hash map * may end up in a undefined state. Per the standard a `Key` or `T` with a * noexcept copy constructor and no move constructor also satisfies the * `std::is_nothrow_move_constructible<std::pair<Key, T>>::value` criterion (and * will thus guarantee the strong exception for the map). * * When `StoreHash` is true, 32 bits of the hash are stored alongside the * values. It can improve the performance during lookups if the `KeyEqual` * function takes time (if it engenders a cache-miss for example) as we then * compare the stored hashes before comparing the keys. When * `tsl::rh::power_of_two_growth_policy` is used as `GrowthPolicy`, it may also * speed-up the rehash process as we can avoid to recalculate the hash. When it * is detected that storing the hash will not incur any memory penality due to * alignement (i.e. `sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, * true>) == sizeof(tsl::detail_robin_hash::bucket_entry<ValueType, false>)`) * and `tsl::rh::power_of_two_growth_policy` is used, the hash will be stored * even if `StoreHash` is false so that we can speed-up the rehash (but it will * not be used on lookups unless `StoreHash` is true). * * `GrowthPolicy` defines how the map grows and consequently how a hash value is * mapped to a bucket. By default the map uses * `tsl::rh::power_of_two_growth_policy`. This policy keeps the number of * buckets to a power of two and uses a mask to map the hash to a bucket instead * of the slow modulo. Other growth policies are available and you may define * your own growth policy, check `tsl::rh::power_of_two_growth_policy` for the * interface. * * `std::pair<Key, T>` must be swappable. * * `Key` and `T` must be copy and/or move constructible. * * If the destructor of `Key` or `T` throws an exception, the behaviour of the * class is undefined. * * Iterators invalidation: * - clear, operator=, reserve, rehash: always invalidate the iterators. * - insert, emplace, emplace_hint, operator[]: if there is an effective * insert, invalidate the iterators. * - erase: always invalidate the iterators. */ template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<std::pair<Key, T>>, bool StoreHash = false, class GrowthPolicy = tsl::rh::power_of_two_growth_policy<2>> class robin_map { private: template <typename U> using has_is_transparent = tsl::detail_robin_hash::has_is_transparent<U>; class KeySelect { public: using key_type = Key; const key_type & operator()(const std::pair<Key, T> &key_value) const noexcept { return key_value.first; } key_type &operator()(std::pair<Key, T> &key_value) noexcept { return key_value.first; } }; class ValueSelect { public: using value_type = T; const value_type & operator()(const std::pair<Key, T> &key_value) const noexcept { return key_value.second; } value_type &operator()(std::pair<Key, T> &key_value) noexcept { return key_value.second; } }; using ht = detail_robin_hash::robin_hash<std::pair<Key, T>, KeySelect, ValueSelect, Hash, KeyEqual, Allocator, StoreHash, GrowthPolicy>; public: using key_type = typename ht::key_type; using mapped_type = T; using value_type = typename ht::value_type; using size_type = typename ht::size_type; using difference_type = typename ht::difference_type; using hasher = typename ht::hasher; using key_equal = typename ht::key_equal; using allocator_type = typename ht::allocator_type; using reference = typename ht::reference; using const_reference = typename ht::const_reference; using pointer = typename ht::pointer; using const_pointer = typename ht::const_pointer; using iterator = typename ht::iterator; using const_iterator = typename ht::const_iterator; public: /* * Constructors */ robin_map() : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {} explicit robin_map(size_type bucket_count, const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), const Allocator &alloc = Allocator()) : m_ht(bucket_count, hash, equal, alloc) {} robin_map(size_type bucket_count, const Allocator &alloc) : robin_map(bucket_count, Hash(), KeyEqual(), alloc) {} robin_map(size_type bucket_count, const Hash &hash, const Allocator &alloc) : robin_map(bucket_count, hash, KeyEqual(), alloc) {} explicit robin_map(const Allocator &alloc) : robin_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {} template <class InputIt> robin_map(InputIt first, InputIt last, size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), const Allocator &alloc = Allocator()) : robin_map(bucket_count, hash, equal, alloc) { insert(first, last); } template <class InputIt> robin_map(InputIt first, InputIt last, size_type bucket_count, const Allocator &alloc) : robin_map(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} template <class InputIt> robin_map(InputIt first, InputIt last, size_type bucket_count, const Hash &hash, const Allocator &alloc) : robin_map(first, last, bucket_count, hash, KeyEqual(), alloc) {} robin_map(std::initializer_list<value_type> init, size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE, const Hash &hash = Hash(), const KeyEqual &equal = KeyEqual(), const Allocator &alloc = Allocator()) : robin_map(init.begin(), init.end(), bucket_count, hash, equal, alloc) {} robin_map(std::initializer_list<value_type> init, size_type bucket_count, const Allocator &alloc) : robin_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc) {} robin_map(std::initializer_list<value_type> init, size_type bucket_count, const Hash &hash, const Allocator &alloc) : robin_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc) {} robin_map &operator=(std::initializer_list<value_type> ilist) { m_ht.clear(); m_ht.reserve(ilist.size()); m_ht.insert(ilist.begin(), ilist.end()); return *this; } allocator_type get_allocator() const { return m_ht.get_allocator(); } /* * Iterators */ iterator begin() noexcept { return m_ht.begin(); } const_iterator begin() const noexcept { return m_ht.begin(); } const_iterator cbegin() const noexcept { return m_ht.cbegin(); } iterator end() noexcept { return m_ht.end(); } const_iterator end() const noexcept { return m_ht.end(); } const_iterator cend() const noexcept { return m_ht.cend(); } /* * Capacity */ bool empty() const noexcept { return m_ht.empty(); } size_type size() const noexcept { return m_ht.size(); } size_type max_size() const noexcept { return m_ht.max_size(); } /* * Modifiers */ void clear() noexcept { m_ht.clear(); } std::pair<iterator, bool> insert(const value_type &value) { return m_ht.insert(value); } template <class P, typename std::enable_if<std::is_constructible< value_type, P &&>::value>::type * = nullptr> std::pair<iterator, bool> insert(P &&value) { return m_ht.emplace(std::forward<P>(value)); } std::pair<iterator, bool> insert(value_type &&value) { return m_ht.insert(std::move(value)); } iterator insert(const_iterator hint, const value_type &value) { return m_ht.insert_hint(hint, value); } template <class P, typename std::enable_if<std::is_constructible< value_type, P &&>::value>::type * = nullptr> iterator insert(const_iterator hint, P &&value) { return m_ht.emplace_hint(hint, std::forward<P>(value)); } iterator insert(const_iterator hint, value_type &&value) { return m_ht.insert_hint(hint, std::move(value)); } template <class InputIt> void insert(InputIt first, InputIt last) { m_ht.insert(first, last); } void insert(std::initializer_list<value_type> ilist) { m_ht.insert(ilist.begin(), ilist.end()); } template <class M> std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) { return m_ht.insert_or_assign(k, std::forward<M>(obj)); } template <class M> std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) { return m_ht.insert_or_assign(std::move(k), std::forward<M>(obj)); } template <class M> iterator insert_or_assign(const_iterator hint, const key_type &k, M &&obj) { return m_ht.insert_or_assign(hint, k, std::forward<M>(obj)); } template <class M> iterator insert_or_assign(const_iterator hint, key_type &&k, M &&obj) { return m_ht.insert_or_assign(hint, std::move(k), std::forward<M>(obj)); } /** * Due to the way elements are stored, emplace will need to move or copy the * key-value once. The method is equivalent to * insert(value_type(std::forward<Args>(args)...)); * * Mainly here for compatibility with the std::unordered_map interface. */ template <class... Args> std::pair<iterator, bool> emplace(Args &&... args) { return m_ht.emplace(std::forward<Args>(args)...); } /** * Due to the way elements are stored, emplace_hint will need to move or copy * the key-value once. The method is equivalent to insert(hint, * value_type(std::forward<Args>(args)...)); * * Mainly here for compatibility with the std::unordered_map interface. */ template <class... Args> iterator emplace_hint(const_iterator hint, Args &&... args) { return m_ht.emplace_hint(hint, std::forward<Args>(args)...); } template <class... Args> std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) { return m_ht.try_emplace(k, std::forward<Args>(args)...); } template <class... Args> std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) { return m_ht.try_emplace(std::move(k), std::forward<Args>(args)...); } template <class... Args> iterator try_emplace(const_iterator hint, const key_type &k, Args &&... args) { return m_ht.try_emplace_hint(hint, k, std::forward<Args>(args)...); } template <class... Args> iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { return m_ht.try_emplace_hint(hint, std::move(k), std::forward<Args>(args)...); } iterator erase(iterator pos) { return m_ht.erase(pos); } iterator erase(const_iterator pos) { return m_ht.erase(pos); } iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); } size_type erase(const key_type &key) { return m_ht.erase(key); } /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup to the value if you already have the hash. */ size_type erase(const key_type &key, std::size_t precalculated_hash) { return m_ht.erase(key, precalculated_hash); } /** * This overload only participates in the overload resolution if the typedef * KeyEqual::is_transparent exists. If so, K must be hashable and comparable * to Key. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> size_type erase(const K &key) { return m_ht.erase(key); } /** * @copydoc erase(const K& key) * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup to the value if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> size_type erase(const K &key, std::size_t precalculated_hash) { return m_ht.erase(key, precalculated_hash); } void swap(robin_map &other) { other.m_ht.swap(m_ht); } /* * Lookup */ T &at(const Key &key) { return m_ht.at(key); } /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ T &at(const Key &key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } const T &at(const Key &key) const { return m_ht.at(key); } /** * @copydoc at(const Key& key, std::size_t precalculated_hash) */ const T &at(const Key &key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } /** * This overload only participates in the overload resolution if the typedef * KeyEqual::is_transparent exists. If so, K must be hashable and comparable * to Key. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> T &at(const K &key) { return m_ht.at(key); } /** * @copydoc at(const K& key) * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> T &at(const K &key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); } /** * @copydoc at(const K& key) */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> const T &at(const K &key) const { return m_ht.at(key); } /** * @copydoc at(const K& key, std::size_t precalculated_hash) */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> const T &at(const K &key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); } T &operator[](const Key &key) { return m_ht[key]; } T &operator[](Key &&key) { return m_ht[std::move(key)]; } size_type count(const Key &key) const { return m_ht.count(key); } /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ size_type count(const Key &key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } /** * This overload only participates in the overload resolution if the typedef * KeyEqual::is_transparent exists. If so, K must be hashable and comparable * to Key. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> size_type count(const K &key) const { return m_ht.count(key); } /** * @copydoc count(const K& key) const * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> size_type count(const K &key, std::size_t precalculated_hash) const { return m_ht.count(key, precalculated_hash); } iterator find(const Key &key) { return m_ht.find(key); } /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ iterator find(const Key &key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } const_iterator find(const Key &key) const { return m_ht.find(key); } /** * @copydoc find(const Key& key, std::size_t precalculated_hash) */ const_iterator find(const Key &key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } /** * This overload only participates in the overload resolution if the typedef * KeyEqual::is_transparent exists. If so, K must be hashable and comparable * to Key. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> iterator find(const K &key) { return m_ht.find(key); } /** * @copydoc find(const K& key) * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> iterator find(const K &key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); } /** * @copydoc find(const K& key) */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> const_iterator find(const K &key) const { return m_ht.find(key); } /** * @copydoc find(const K& key) * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> const_iterator find(const K &key, std::size_t precalculated_hash) const { return m_ht.find(key, precalculated_hash); } std::pair<iterator, iterator> equal_range(const Key &key) { return m_ht.equal_range(key); } /** * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ std::pair<iterator, iterator> equal_range(const Key &key, std::size_t precalculated_hash) { return m_ht.equal_range(key, precalculated_hash); } std::pair<const_iterator, const_iterator> equal_range(const Key &key) const { return m_ht.equal_range(key); } /** * @copydoc equal_range(const Key& key, std::size_t precalculated_hash) */ std::pair<const_iterator, const_iterator> equal_range(const Key &key, std::size_t precalculated_hash) const { return m_ht.equal_range(key, precalculated_hash); } /** * This overload only participates in the overload resolution if the typedef * KeyEqual::is_transparent exists. If so, K must be hashable and comparable * to Key. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> std::pair<iterator, iterator> equal_range(const K &key) { return m_ht.equal_range(key); } /** * @copydoc equal_range(const K& key) * * Use the hash value 'precalculated_hash' instead of hashing the key. The * hash value should be the same as hash_function()(key). Usefull to speed-up * the lookup if you already have the hash. */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> std::pair<iterator, iterator> equal_range(const K &key, std::size_t precalculated_hash) { return m_ht.equal_range(key, precalculated_hash); } /** * @copydoc equal_range(const K& key) */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> std::pair<const_iterator, const_iterator> equal_range(const K &key) const { return m_ht.equal_range(key); } /** * @copydoc equal_range(const K& key, std::size_t precalculated_hash) */ template < class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type * = nullptr> std::pair<const_iterator, const_iterator> equal_range(const K &key, std::size_t precalculated_hash) const { return m_ht.equal_range(key, precalculated_hash); } /* * Bucket interface */ size_type bucket_count() const { return m_ht.bucket_count(); } size_type max_bucket_count() const { return m_ht.max_bucket_count(); } /* * Hash policy */ float load_factor() const { return m_ht.load_factor(); } float min_load_factor() const { return m_ht.min_load_factor(); } float max_load_factor() const { return m_ht.max_load_factor(); } /** * Set the `min_load_factor` to `ml`. When the `load_factor` of the map goes * below `min_load_factor` after some erase operations, the map will be * shrunk when an insertion occurs. The erase method itself never shrinks * the map. * * The default value of `min_load_factor` is 0.0f, the map never shrinks by * default. */ void min_load_factor(float ml) { m_ht.min_load_factor(ml); } void max_load_factor(float ml) { m_ht.max_load_factor(ml); } void rehash(size_type count) { m_ht.rehash(count); } void reserve(size_type count) { m_ht.reserve(count); } /* * Observers */ hasher hash_function() const { return m_ht.hash_function(); } key_equal key_eq() const { return m_ht.key_eq(); } /* * Other */ /** * Convert a const_iterator to an iterator. */ iterator mutable_iterator(const_iterator pos) { return m_ht.mutable_iterator(pos); } friend bool operator==(const robin_map &lhs, const robin_map &rhs) { if (lhs.size() != rhs.size()) { return false; } for (const auto &element_lhs : lhs) { const auto it_element_rhs = rhs.find(element_lhs.first); if (it_element_rhs == rhs.cend() || element_lhs.second != it_element_rhs->second) { return false; } } return true; } friend bool operator!=(const robin_map &lhs, const robin_map &rhs) { return !operator==(lhs, rhs); } friend void swap(robin_map &lhs, robin_map &rhs) { lhs.swap(rhs); } private: ht m_ht; }; /** * Same as `tsl::robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash, * tsl::rh::prime_growth_policy>`. */ template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<std::pair<Key, T>>, bool StoreHash = false> using robin_pg_map = robin_map<Key, T, Hash, KeyEqual, Allocator, StoreHash, tsl::rh::prime_growth_policy>; } // end namespace tsl #endif
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/include/utility/timer.h
C/C++ Header
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <chrono> #ifdef TV_CUDA #include <cuda_runtime_api.h> #endif #include <iostream> namespace spconv { #ifdef TV_CUDA template <typename TimeT = std::chrono::microseconds> struct CudaContextTimer { CudaContextTimer() { cudaDeviceSynchronize(); mCurTime = std::chrono::steady_clock::now(); } typename TimeT::rep report() { cudaDeviceSynchronize(); auto duration = std::chrono::duration_cast<TimeT>( std::chrono::steady_clock::now() - mCurTime); auto res = duration.count(); mCurTime = std::chrono::steady_clock::now(); return res; } private: std::chrono::time_point<std::chrono::steady_clock> mCurTime; }; #endif template <typename TimeT = std::chrono::microseconds> struct CPUTimer { CPUTimer() { mCurTime = std::chrono::steady_clock::now(); } typename TimeT::rep report() { auto duration = std::chrono::duration_cast<TimeT>( std::chrono::steady_clock::now() - mCurTime); auto res = duration.count(); mCurTime = std::chrono::steady_clock::now(); return res; } private: std::chrono::time_point<std::chrono::steady_clock> mCurTime; }; } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/setup.py
Python
import os import platform import re import subprocess import sys from distutils.version import LooseVersion from pathlib import Path import torch from setuptools import Extension, find_packages, setup from setuptools.command.build_ext import build_ext # if 'LIBTORCH_ROOT' not in os.environ: # raise ValueError("You must set LIBTORCH_ROOT to your torch c++ library.") LIBTORCH_ROOT = str(Path(torch.__file__).parent) SPCONV_FORCE_BUILD_CUDA = os.getenv("SPCONV_FORCE_BUILD_CUDA") PYTHON_VERSION = "{}.{}".format(sys.version_info.major, sys.version_info.minor) remove_device = re.search(r"(\+|\.)(dev|cu|cpu)", torch.__version__) PYTORCH_VERSION = torch.__version__ if remove_device is not None: PYTORCH_VERSION = torch.__version__[:remove_device.start()] PYTORCH_VERSION = list(map(int, PYTORCH_VERSION.split("."))) PYTORCH_VERSION_NUMBER = PYTORCH_VERSION[0] * 10000 + PYTORCH_VERSION[1] * 100 + PYTORCH_VERSION[2] class CMakeExtension(Extension): def __init__(self, name, sourcedir='', library_dirs=[]): Extension.__init__(self, name, sources=[], library_dirs=library_dirs) self.sourcedir = os.path.abspath(sourcedir) class CMakeBuild(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError("CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)) if platform.system() == "Windows": cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1)) if cmake_version < '3.13.0': raise RuntimeError("CMake >= 3.13.0 is required on Windows") for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) cmake_args = [# '-G "Visual Studio 15 2017 Win64"', '-DCMAKE_PREFIX_PATH={}'.format(LIBTORCH_ROOT), '-DPYBIND11_PYTHON_VERSION={}'.format(PYTHON_VERSION), '-DSPCONV_BuildTests=OFF', '-DPYTORCH_VERSION={}'.format(PYTORCH_VERSION_NUMBER) ] # -arch=sm_61 if not torch.cuda.is_available() and SPCONV_FORCE_BUILD_CUDA is None: cmake_args += ['-DSPCONV_BuildCUDA=OFF'] else: cuda_flags = ["\"--expt-relaxed-constexpr\""] # must add following flags to use at::Half # but will remove raw half operators. cuda_flags += ["-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__"] # cuda_flags += ["-D__CUDA_NO_HALF2_OPERATORS__"] cmake_args += ['-DCMAKE_CUDA_FLAGS=' + " ".join(cuda_flags)] cfg = 'Debug' if self.debug else 'Release' assert cfg == "Release", "pytorch ops don't support debug build." build_args = ['--config', cfg] print(cfg) if platform.system() == "Windows": cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), str(Path(extdir) / "spconv"))] # cmake_args += ['-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), str(Path(extdir) / "spconv"))] cmake_args += ['-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), str(Path(extdir) / "spconv"))] cmake_args += ["-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE"] if sys.maxsize > 2**32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(str(Path(extdir) / "spconv"))] cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] build_args += ['--', '-j4'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) print("|||||CMAKE ARGS|||||", cmake_args) subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) packages = find_packages(exclude=('tools', 'tools.*')) setup( name='spconv', version='1.2.1', author='Yan Yan', author_email='scrin@foxmail.com', description='spatial sparse convolution for pytorch', long_description='', setup_requires = ['torch>=1.3.0'], packages=packages, package_dir = {'spconv': 'spconv'}, ext_modules=[CMakeExtension('spconv', library_dirs=[])], cmdclass=dict(build_ext=CMakeBuild), zip_safe=False, )
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/__init__.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform from pathlib import Path import numpy as np import torch from spconv import ops, utils from spconv.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d, SparseConvTranspose3d, SparseInverseConv2d, SparseInverseConv3d, SubMConv2d, SubMConv3d) from spconv.identity import Identity from spconv.modules import SparseModule, SparseSequential from spconv.ops import ConvAlgo from spconv.pool import SparseMaxPool2d, SparseMaxPool3d from spconv.tables import AddTable, ConcatTable, JoinTable _LIB_FILE_NAME = "libspconv.so" if platform.system() == "Windows": _LIB_FILE_NAME = "spconv.dll" _LIB_PATH = str(Path(__file__).parent / _LIB_FILE_NAME) torch.ops.load_library(_LIB_PATH) def scatter_nd(indices, updates, shape): """pytorch edition of tensorflow scatter_nd. this function don't contain except handle code. so use this carefully when indice repeats, don't support repeat add which is supported in tensorflow. """ ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device) ndim = indices.shape[-1] output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:] flatted_indices = indices.view(-1, ndim) slices = [flatted_indices[:, i] for i in range(ndim)] slices += [Ellipsis] ret[slices] = updates.view(*output_shape) return ret class SparseConvTensor(object): def __init__(self, features, indices, spatial_shape, batch_size, grid=None): """ Args: features: [num_points, num_features] feature tensor indices: [num_points, ndim + 1] indice tensor. batch index saved in indices[:, 0] spatial_shape: spatial shape of your sparse data batch_size: batch size of your sparse data grid: pre-allocated grid tensor. should be used when the volume of spatial shape is very large. """ self.features = features self.indices = indices self.spatial_shape = spatial_shape self.batch_size = batch_size self.indice_dict = {} self.grid = grid @classmethod def from_dense(cls, x: torch.Tensor): """create sparse tensor fron channel last dense tensor by to_sparse x must be NHWC tensor, channel last """ x = x.to_sparse(x.ndim - 1) spatial_shape = x.shape[1:-1] batch_size = x.shape[0] indices_th = x.indices().permute(1, 0).contiguous().int() features_th = x.values() return cls(features_th, indices_th, spatial_shape, batch_size) @property def spatial_size(self): return np.prod(self.spatial_shape) def find_indice_pair(self, key): if key is None: return None if key in self.indice_dict: return self.indice_dict[key] return None def dense(self, channels_first=True): output_shape = [self.batch_size] + list( self.spatial_shape) + [self.features.shape[1]] res = scatter_nd( self.indices.to(self.features.device).long(), self.features, output_shape) if not channels_first: return res ndim = len(self.spatial_shape) trans_params = list(range(0, ndim + 1)) trans_params.insert(1, ndim + 1) return res.permute(*trans_params).contiguous() @property def sparity(self): return self.indices.shape[0] / np.prod( self.spatial_shape) / self.batch_size class ToDense(SparseModule): """convert SparseConvTensor to NCHW dense tensor. """ def forward(self, x: SparseConvTensor): return x.dense() class RemoveGrid(SparseModule): """remove pre-allocated grid buffer. """ def forward(self, x: SparseConvTensor): x.grid = None return x
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/conv.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import time import numpy as np import torch from torch import nn from torch.nn import init from torch.nn.parameter import Parameter import spconv import spconv.functional as Fsp from spconv import ops from spconv.modules import SparseModule def _calculate_fan_in_and_fan_out_hwio(tensor): dimensions = tensor.ndimension() if dimensions < 2: raise ValueError( "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" ) if dimensions == 2: # Linear fan_in = tensor.size(-2) fan_out = tensor.size(-1) else: num_input_fmaps = tensor.size(-2) num_output_fmaps = tensor.size(-1) receptive_field_size = 1 if tensor.dim() > 2: receptive_field_size = tensor[..., 0, 0].numel() fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out class SparseConvolution(SparseModule): __constants__ = [ 'stride', 'padding', 'dilation', 'groups', 'bias', 'subm', 'inverse', 'transposed', 'output_padding', 'fused_bn' ] def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConvolution, self).__init__() assert groups == 1 if not isinstance(kernel_size, (list, tuple)): kernel_size = [kernel_size] * ndim if not isinstance(stride, (list, tuple)): stride = [stride] * ndim if not isinstance(padding, (list, tuple)): padding = [padding] * ndim if not isinstance(dilation, (list, tuple)): dilation = [dilation] * ndim if not isinstance(output_padding, (list, tuple)): output_padding = [output_padding] * ndim for d, s in zip(dilation, stride): assert any([s == 1, d == 1]), "don't support this." self.ndim = ndim self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.conv1x1 = np.prod(kernel_size) == 1 self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.inverse = inverse self.output_padding = output_padding self.groups = groups self.subm = subm self.indice_key = indice_key self.fused_bn = fused_bn self.use_hash = use_hash self.algo = algo.value self.weight = Parameter( torch.Tensor(*kernel_size, in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = _calculate_fan_in_and_fan_out_hwio(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): assert isinstance(input, spconv.SparseConvTensor) features = input.features device = features.device indices = input.indices spatial_shape = input.spatial_shape batch_size = input.batch_size if not self.subm: if self.transposed: out_spatial_shape = ops.get_deconv_output_size( spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding) else: out_spatial_shape = ops.get_conv_output_size( spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation) else: out_spatial_shape = spatial_shape # input.update_grid(out_spatial_shape) # t = time.time() if self.conv1x1: features = torch.mm( input.features, self.weight.view(self.in_channels, self.out_channels)) if self.bias is not None: features += self.bias out_tensor = spconv.SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size) out_tensor.indice_dict = input.indice_dict out_tensor.grid = input.grid return out_tensor datas = input.find_indice_pair(self.indice_key) if self.inverse: assert datas is not None and self.indice_key is not None _, outids, indice_pairs, indice_pair_num, out_spatial_shape = datas assert indice_pair_num.shape[0] == np.prod( self.kernel_size ), "inverse conv must have same kernel size as its couple conv" else: if self.indice_key is not None and datas is not None: outids, _, indice_pairs, indice_pair_num, _ = datas else: outids, indice_pairs, indice_pair_num = ops.get_indice_pairs( indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, self.output_padding, self.subm, self.transposed, grid=input.grid, use_hash=self.use_hash) input.indice_dict[self.indice_key] = (outids, indices, indice_pairs, indice_pair_num, spatial_shape) if self.fused_bn: assert self.bias is not None out_features = ops.fused_indice_conv(features, self.weight, self.bias, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.inverse, self.subm) else: if self.subm: out_features = Fsp.indice_subm_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.algo) else: if self.inverse: out_features = Fsp.indice_inverse_conv( features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.algo) else: out_features = Fsp.indice_conv(features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0], self.algo) if self.bias is not None: out_features += self.bias out_tensor = spconv.SparseConvTensor(out_features, outids, out_spatial_shape, batch_size) out_tensor.indice_dict = input.indice_dict out_tensor.grid = input.grid return out_tensor class SparseConv2d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key, use_hash=use_hash, algo=algo) class SparseConv3d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key, use_hash=use_hash, algo=algo) class SparseConv4d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key, use_hash=use_hash, algo=algo) class SparseConvTranspose2d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConvTranspose2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key, use_hash=use_hash, algo=algo) class SparseConvTranspose3d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SparseConvTranspose3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, transposed=True, indice_key=indice_key, use_hash=use_hash, algo=algo) class SparseInverseConv2d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, indice_key, bias=True, algo=ops.ConvAlgo.Native): super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key, algo=algo) class SparseInverseConv3d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, indice_key, bias=True, algo=ops.ConvAlgo.Native): super(SparseInverseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key, algo=algo) class SubMConv2d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SubMConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key, use_hash=use_hash, algo=algo) class SubMConv3d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SubMConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key, use_hash=use_hash, algo=algo) class SubMConv4d(SparseConvolution): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None, use_hash=False, algo=ops.ConvAlgo.Native): super(SubMConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, True, indice_key=indice_key, use_hash=use_hash, algo=algo)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/functional.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from torch.autograd import Function import spconv.ops as ops class SparseConvFunction(Function): @staticmethod def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out, algo): ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters) ctx.algo = algo return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, algo=algo) @staticmethod def backward(ctx, grad_output): indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors input_bp, filters_bp = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, algo=ctx.algo) return input_bp, filters_bp, None, None, None, None class SparseInverseConvFunction(Function): @staticmethod def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out, algo): ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters) ctx.algo = algo return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, True, False, algo=algo) @staticmethod def backward(ctx, grad_output): indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors input_bp, filters_bp = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, True, False, algo=ctx.algo) return input_bp, filters_bp, None, None, None, None class SubMConvFunction(Function): @staticmethod def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out, algo): ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters) ctx.algo = algo return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, True, algo=algo) @staticmethod def backward(ctx, grad_output): indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors input_bp, filters_bp = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, True, algo=ctx.algo) return input_bp, filters_bp, None, None, None, None class SparseMaxPoolFunction(Function): @staticmethod def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out): out = ops.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out) ctx.save_for_backward(indice_pairs, indice_pair_num, features, out) return out @staticmethod def backward(ctx, grad_output): indice_pairs, indice_pair_num, features, out = ctx.saved_tensors input_bp = ops.indice_maxpool_backward(features, out, grad_output, indice_pairs, indice_pair_num) return input_bp, None, None, None indice_conv = SparseConvFunction.apply indice_inverse_conv = SparseInverseConvFunction.apply indice_subm_conv = SubMConvFunction.apply indice_maxpool = SparseMaxPoolFunction.apply
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/identity.py
Python
# Copyright 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from torch.nn import Module class Identity(Module): def forward(self, input): return input def input_spatial_size(self, out_size): return out_size
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/modules.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import time from collections import OrderedDict import torch from torch import nn import spconv def is_spconv_module(module): spconv_modules = (SparseModule, ) return isinstance(module, spconv_modules) def is_sparse_conv(module): from spconv.conv import SparseConvolution return isinstance(module, SparseConvolution) def _mean_update(vals, m_vals, t): outputs = [] if not isinstance(vals, list): vals = [vals] if not isinstance(m_vals, list): m_vals = [m_vals] for val, m_val in zip(vals, m_vals): output = t / float(t + 1) * m_val + 1 / float(t + 1) * val outputs.append(output) if len(outputs) == 1: outputs = outputs[0] return outputs class SparseModule(nn.Module): """ place holder, all module subclass from this will take sptensor in SparseSequential. """ pass class SparseSequential(SparseModule): r"""A sequential container. Modules will be added to it in the order they are passed in the constructor. Alternatively, an ordered dict of modules can also be passed in. To make it easier to understand, given is a small example:: # Example of using Sequential model = SparseSequential( SparseConv2d(1,20,5), nn.ReLU(), SparseConv2d(20,64,5), nn.ReLU() ) # Example of using Sequential with OrderedDict model = SparseSequential(OrderedDict([ ('conv1', SparseConv2d(1,20,5)), ('relu1', nn.ReLU()), ('conv2', SparseConv2d(20,64,5)), ('relu2', nn.ReLU()) ])) # Example of using Sequential with kwargs(python 3.6+) model = SparseSequential( conv1=SparseConv2d(1,20,5), relu1=nn.ReLU(), conv2=SparseConv2d(20,64,5), relu2=nn.ReLU() ) """ def __init__(self, *args, **kwargs): super(SparseSequential, self).__init__() if len(args) == 1 and isinstance(args[0], OrderedDict): for key, module in args[0].items(): self.add_module(key, module) else: for idx, module in enumerate(args): self.add_module(str(idx), module) for name, module in kwargs.items(): if sys.version_info < (3, 6): raise ValueError("kwargs only supported in py36+") if name in self._modules: raise ValueError("name exists.") self.add_module(name, module) self._sparity_dict = {} def __getitem__(self, idx): if not (-len(self) <= idx < len(self)): raise IndexError('index {} is out of range'.format(idx)) if idx < 0: idx += len(self) it = iter(self._modules.values()) for i in range(idx): next(it) return next(it) def __len__(self): return len(self._modules) @property def sparity_dict(self): return self._sparity_dict def add(self, module, name=None): if name is None: name = str(len(self._modules)) if name in self._modules: raise KeyError("name exists") self.add_module(name, module) def forward(self, input): for k, module in self._modules.items(): if is_spconv_module(module): # use SpConvTensor as input if isinstance(input, list): input = module(input) else: assert isinstance(input, spconv.SparseConvTensor) self._sparity_dict[k] = input.sparity input = module(input) else: if isinstance(input, spconv.SparseConvTensor): if input.indices.shape[0] != 0: input.features = module(input.features) else: input = module(input) return input def fused(self): """don't use this. no effect. """ from spconv.conv import SparseConvolution mods = [v for k, v in self._modules.items()] fused_mods = [] idx = 0 while idx < len(mods): if is_sparse_conv(mods[idx]): if idx < len(mods) - 1 and isinstance(mods[idx + 1], nn.BatchNorm1d): new_module = SparseConvolution( ndim=mods[idx].ndim, in_channels=mods[idx].in_channels, out_channels=mods[idx].out_channels, kernel_size=mods[idx].kernel_size, stride=mods[idx].stride, padding=mods[idx].padding, dilation=mods[idx].dilation, groups=mods[idx].groups, bias=True, subm=mods[idx].subm, output_padding=mods[idx].output_padding, transposed=mods[idx].transposed, inverse=mods[idx].inverse, indice_key=mods[idx].indice_key, fused_bn=True, ) new_module.load_state_dict(mods[idx].state_dict(), False) new_module.to(mods[idx].weight.device) conv = new_module bn = mods[idx + 1] conv.bias.data.zero_() conv.weight.data[:] = conv.weight.data * bn.weight.data / ( torch.sqrt(bn.running_var) + bn.eps) conv.bias.data[:] = ( conv.bias.data - bn.running_mean) * bn.weight.data / ( torch.sqrt(bn.running_var) + bn.eps) + bn.bias.data fused_mods.append(conv) idx += 2 else: fused_mods.append(mods[idx]) idx += 1 else: fused_mods.append(mods[idx]) idx += 1 return SparseSequential(*fused_mods)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/ops.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum import torch import spconv class ConvAlgo(Enum): Native = 0 # small memory cost, faster when number of points is large. Batch = 1 # high memory cost, faster when number of points is small (< 50000) BatchGemmGather = 2 # high memory cost, faster when number of points medium def get_conv_output_size(input_size, kernel_size, stride, padding, dilation): ndim = len(input_size) output_size = [] for i in range(ndim): size = (input_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1) // stride[i] + 1 if kernel_size[i] == -1: output_size.append(1) else: output_size.append(size) return output_size def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding): ndim = len(input_size) output_size = [] for i in range(ndim): if kernel_size[i] == -1: raise ValueError("deconv don't support kernel_size < 0") size = (input_size[i] - 1) * stride[i] - 2 * padding[i] + kernel_size[ i] + output_padding[i] output_size.append(size) return output_size def get_indice_pairs(indices, batch_size, spatial_shape, ksize=3, stride=1, padding=0, dilation=1, out_padding=0, subm=False, transpose=False, grid=None, use_hash=False): ndim = indices.shape[1] - 1 if not isinstance(ksize, (list, tuple)): ksize = [ksize] * ndim if not isinstance(stride, (list, tuple)): stride = [stride] * ndim if not isinstance(padding, (list, tuple)): padding = [padding] * ndim if not isinstance(dilation, (list, tuple)): dilation = [dilation] * ndim if not isinstance(out_padding, (list, tuple)): out_padding = [out_padding] * ndim for d, s in zip(dilation, stride): assert any([s == 1, d == 1]), "don't support this." if not subm: if transpose: out_shape = get_deconv_output_size(spatial_shape, ksize, stride, padding, dilation, out_padding) else: out_shape = get_conv_output_size(spatial_shape, ksize, stride, padding, dilation) else: out_shape = spatial_shape if grid is None: res = torch.ops.spconv.get_indice_pairs(indices, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose), int(use_hash)) return res else: if ndim == 2: get_indice_pairs_func = torch.ops.spconv.get_indice_pairs_grid_2d elif ndim == 3: get_indice_pairs_func = torch.ops.spconv.get_indice_pairs_grid_3d else: raise NotImplementedError return get_indice_pairs_func(indices, grid, batch_size, out_shape, spatial_shape, ksize, stride, padding, dilation, out_padding, int(subm), int(transpose), int(use_hash)) def indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False, algo=ConvAlgo.Native.value): return torch.ops.spconv.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm), algo) def fused_indice_conv(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm): return torch.ops.spconv.fused_indice_conv_bn(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, int(inverse), int(subm)) def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False, algo=ConvAlgo.Native.value): return torch.ops.spconv.indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm), algo) def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out): return torch.ops.spconv.indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out) def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num): return torch.ops.spconv.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num) def nms(boxes, scores, pre_max_size, post_max_size, thresh, eps): res = torch.ops.spconv.nms(boxes, scores, pre_max_size, post_max_size, thresh, eps) return res def pillar_scatter(features, coors, shape): if features.dtype == torch.float32: return torch.ops.spconv.pillar_scatter_float(features, coors, shape) elif features.dtype == torch.half: return torch.ops.spconv.pillar_scatter_half(features, coors, shape) else: raise NotImplementedError
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/pool.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import time import numpy as np import torch from torch import nn from torch.nn import init from torch.nn.parameter import Parameter import spconv import spconv.functional as Fsp from spconv import ops from spconv.modules import SparseModule class SparseMaxPool(SparseModule): def __init__(self, ndim, kernel_size, stride=None, padding=0, dilation=1, subm=False): super(SparseMaxPool, self).__init__() if not isinstance(kernel_size, (list, tuple)): kernel_size = [kernel_size] * ndim if stride is None: stride = kernel_size.copy() if not isinstance(stride, (list, tuple)): stride = [stride] * ndim if not isinstance(padding, (list, tuple)): padding = [padding] * ndim if not isinstance(dilation, (list, tuple)): dilation = [dilation] * ndim self.ndim = ndim self.kernel_size = kernel_size self.stride = stride self.padding = padding self.subm = subm self.dilation = dilation def forward(self, input): assert isinstance(input, spconv.SparseConvTensor) features = input.features device = features.device indices = input.indices spatial_shape = input.spatial_shape batch_size = input.batch_size if not self.subm: out_spatial_shape = ops.get_conv_output_size( spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation) else: out_spatial_shape = spatial_shape outids, indice_pairs, indice_pairs_num = ops.get_indice_pairs( indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, 0, self.subm) out_features = Fsp.indice_maxpool(features, indice_pairs.to(device), indice_pairs_num.to(device), outids.shape[0]) out_tensor = spconv.SparseConvTensor(out_features, outids, out_spatial_shape, batch_size) out_tensor.indice_dict = input.indice_dict out_tensor.grid = input.grid return out_tensor class SparseMaxPool2d(SparseMaxPool): def __init__(self, kernel_size, stride=None, padding=0, dilation=1): super(SparseMaxPool2d, self).__init__(2, kernel_size, stride, padding, dilation) class SparseMaxPool3d(SparseMaxPool): def __init__(self, kernel_size, stride=None, padding=0, dilation=1): super(SparseMaxPool3d, self).__init__(3, kernel_size, stride, padding, dilation)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/tables.py
Python
import torch from torch.autograd import Function import spconv #from torch.nn import Module from spconv.modules import SparseModule class JoinTable(SparseModule): # Module): def forward(self, input): output = spconv.SparseConvTensor( torch.cat([i.features for i in input], 1), input[1].indices, input[1].spatial_shape, input[0].batch_size) output.indice_dict = input[1].indice_dict output.grid = input[1].grid return output def input_spatial_size(self, out_size): return out_size class AddTable(SparseModule): # Module): def forward(self, input): output = spconv.SparseConvTensor(sum([i.features for i in input]), input[1].indices, input[1].spatial_shape, input[1].batch_size) output.indice_dict = input[1].indice_dict output.grid = input[1].grid return output def input_spatial_size(self, out_size): return out_size class ConcatTable(SparseModule): # Module): def forward(self, input): return [module(input) for module in self._modules.values()] def add(self, module): self._modules[str(len(self._modules))] = module return self def input_spatial_size(self, out_size): return self._modules['0'].input_spatial_size(out_size)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/test_utils.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np class TestCase(unittest.TestCase): def _GetNdArray(self, a): if not isinstance(a, np.ndarray): a = np.array(a) return a def assertAllEqual(self, a, b): """Asserts that two numpy arrays have the same values. Args: a: the expected numpy ndarray or anything can be converted to one. b: the actual numpy ndarray or anything can be converted to one. """ a = self._GetNdArray(a) b = self._GetNdArray(b) self.assertEqual( a.shape, b.shape, "Shape mismatch: expected %s, got %s." % (a.shape, b.shape)) same = (a == b) if a.dtype == np.float32 or a.dtype == np.float64: same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b))) if not np.all(same): # Prints more details than np.testing.assert_array_equal. diff = np.logical_not(same) if a.ndim: x = a[np.where(diff)] y = b[np.where(diff)] print("not equal where = ", np.where(diff)) else: # np.where is broken for scalars x, y = a, b print("not equal lhs = ", x) print("not equal rhs = ", y) np.testing.assert_array_equal(a, b) def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6): """Asserts that two numpy arrays, or dicts of same, have near values. This does not support nested dicts. Args: a: The expected numpy ndarray (or anything can be converted to one), or dict of same. Must be a dict iff `b` is a dict. b: The actual numpy ndarray (or anything can be converted to one), or dict of same. Must be a dict iff `a` is a dict. rtol: relative tolerance. atol: absolute tolerance. Raises: ValueError: if only one of `a` and `b` is a dict. """ is_a_dict = isinstance(a, dict) if is_a_dict != isinstance(b, dict): raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b)) if is_a_dict: self.assertCountEqual(a.keys(), b.keys(), msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys())) for k in a: self._assertArrayLikeAllClose(a[k], b[k], rtol=rtol, atol=atol, msg="%s: expected %s, got %s." % (k, a, b)) else: self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol) def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None): a = self._GetNdArray(a) b = self._GetNdArray(b) self.assertEqual( a.shape, b.shape, "Shape mismatch: expected %s, got %s." % (a.shape, b.shape)) if not np.allclose(a, b, rtol=rtol, atol=atol): # Prints more details than np.testing.assert_allclose. # # NOTE: numpy.allclose (and numpy.testing.assert_allclose) # checks whether two arrays are element-wise equal within a # tolerance. The relative difference (rtol * abs(b)) and the # absolute difference atol are added together to compare against # the absolute difference between a and b. Here, we want to # print out which elements violate such conditions. cond = np.logical_or( np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b)) if a.ndim: x = a[np.where(cond)] y = b[np.where(cond)] print("not close where = ", np.where(cond)) else: # np.where is broken for scalars x, y = a, b print("not close lhs = ", x) print("not close rhs = ", y) print("not close dif = ", np.abs(x - y)) print("not close tol = ", atol + rtol * np.abs(y)) print("dtype = %s, shape = %s" % (a.dtype, a.shape)) np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg) def params_grid(*params): size = len(params) length = 1 for p in params: length *= len(p) sizes = [len(p) for p in params] counter = [0] * size total = [] for i in range(length): total.append([0] * size) for i in range(length): for j in range(size): total[i][j] = params[j][counter[j]] counter[size - 1] += 1 for c in range(size - 1, -1, -1): if (counter[c] == sizes[c] and c > 0): counter[c - 1] += 1 counter[c] = 0 return total def generate_sparse_data(shape, num_points, num_channels, integer=False, data_range=(-1, 1), with_dense=True, dtype=np.float32): dense_shape = shape ndim = len(dense_shape) # num_points = np.random.randint(10, 100, size=[batch_size, ndim]) num_points = np.array(num_points) # num_points = np.array([3, 2]) batch_size = len(num_points) batch_indices = [] coors_total = np.stack(np.meshgrid(*[np.arange(0, s) for s in shape]), axis=-1) coors_total = coors_total.reshape(-1, ndim) for i in range(batch_size): np.random.shuffle(coors_total) inds_total = coors_total[:num_points[i]] inds_total = np.pad(inds_total, ((0, 0), (0, 1)), mode="constant", constant_values=i) batch_indices.append(inds_total) if integer: sparse_data = np.random.randint(data_range[0], data_range[1], size=[num_points.sum(), num_channels]).astype(dtype) else: sparse_data = np.random.uniform(data_range[0], data_range[1], size=[num_points.sum(), num_channels]).astype(dtype) # sparse_data = np.arange(1, num_points.sum() + 1).astype(np.float32).reshape(5, 1) res = { "features": sparse_data.astype(dtype), } if with_dense: dense_data = np.zeros([batch_size, num_channels, *dense_shape], dtype=sparse_data.dtype) start = 0 for i, inds in enumerate(batch_indices): for j, ind in enumerate(inds): dense_slice = (i, slice(None), *ind[:-1]) dense_data[dense_slice] = sparse_data[start + j] start += len(inds) res["features_dense"] = dense_data.astype(dtype) batch_indices = np.concatenate(batch_indices, axis=0) res["indices"] = batch_indices.astype(np.int32) return res
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/spconv/utils/__init__.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from spconv import spconv_utils from spconv.spconv_utils import (non_max_suppression_cpu, points_to_voxel_3d_np, points_to_voxel_3d_np_mean, points_to_voxel_3d_with_filtering, rbbox_intersection, rbbox_iou, rotate_non_max_suppression_cpu) try: from spconv.spconv_utils import non_max_suppression except ImportError: pass def points_to_voxel(points, voxel_size, coors_range, coor_to_voxelidx, max_points=35, max_voxels=20000, full_mean=False, block_filtering=True, block_factor=1, block_size=8, height_threshold=0.2, height_high_threshold=3.0, pad_output=False): """convert 3d points(N, >=3) to voxels. This version calculate everything in one loop. now it takes only 0.8ms(~6k voxels) with c++ and 3.2ghz cpu. Args: points: [N, ndim] float tensor. points[:, :3] contain xyz points and points[:, 3:] contain other information such as reflectivity. voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size coors_range: [6] list/tuple or array, float. indicate voxel range. format: xyzxyz, minmax coor_to_voxelidx: int array. used as a dense map. max_points: int. indicate maximum points contained in a voxel. max_voxels: int. indicate maximum voxels this function create. for voxelnet, 20000 is a good choice. you should shuffle points before call this function because max_voxels may drop some points. full_mean: bool. if true, all empty points in voxel will be filled with mean of exist points. block_filtering: filter voxels by height. used for lidar point cloud. use some visualization tool to see filtered result. Returns: voxels: [M, max_points, ndim] float tensor. only contain points. coordinates: [M, 3] int32 tensor. zyx format. num_points_per_voxel: [M] int32 tensor. """ if full_mean: assert block_filtering is False if not isinstance(voxel_size, np.ndarray): voxel_size = np.array(voxel_size, dtype=points.dtype) if not isinstance(coors_range, np.ndarray): coors_range = np.array(coors_range, dtype=points.dtype) voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist()) voxelmap_shape = voxelmap_shape[::-1] num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32) voxels = np.zeros(shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype) voxel_point_mask = np.zeros(shape=(max_voxels, max_points), dtype=points.dtype) coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32) res = { "voxels": voxels, "coordinates": coors, "num_points_per_voxel": num_points_per_voxel, "voxel_point_mask": voxel_point_mask, } if full_mean: means = np.zeros(shape=(max_voxels, points.shape[-1]), dtype=points.dtype) voxel_num = points_to_voxel_3d_np_mean(points, voxels, voxel_point_mask, means, coors, num_points_per_voxel, coor_to_voxelidx, voxel_size.tolist(), coors_range.tolist(), max_points, max_voxels) else: if block_filtering: block_shape = [*voxelmap_shape[1:]] block_shape = [b // block_factor for b in block_shape] mins = np.full(block_shape, 99999999, dtype=points.dtype) maxs = np.full(block_shape, -99999999, dtype=points.dtype) voxel_mask = np.zeros((max_voxels, ), dtype=np.int32) voxel_num = points_to_voxel_3d_with_filtering( points, voxels, voxel_point_mask, voxel_mask, mins, maxs, coors, num_points_per_voxel, coor_to_voxelidx, voxel_size.tolist(), coors_range.tolist(), max_points, max_voxels, block_factor, block_size, height_threshold, height_high_threshold) voxel_mask = voxel_mask.astype(np.bool_) coors_ = coors[voxel_mask] if pad_output: res["coordinates"][:voxel_num] = coors_ res["voxels"][:voxel_num] = voxels[voxel_mask] res["voxel_point_mask"][:voxel_num] = voxel_point_mask[ voxel_mask] res["num_points_per_voxel"][:voxel_num] = num_points_per_voxel[ voxel_mask] res["coordinates"][voxel_num:] = 0 res["voxels"][voxel_num:] = 0 res["num_points_per_voxel"][voxel_num:] = 0 res["voxel_point_mask"][voxel_num:] = 0 else: res["coordinates"] = coors_ res["voxels"] = voxels[voxel_mask] res["num_points_per_voxel"] = num_points_per_voxel[voxel_mask] res["voxel_point_mask"] = voxel_point_mask[voxel_mask] voxel_num = coors_.shape[0] else: voxel_num = points_to_voxel_3d_np(points, voxels, voxel_point_mask, coors, num_points_per_voxel, coor_to_voxelidx, voxel_size.tolist(), coors_range.tolist(), max_points, max_voxels) res["voxel_num"] = voxel_num res["voxel_point_mask"] = res["voxel_point_mask"].reshape( -1, max_points, 1) return res class VoxelGenerator: def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000, full_mean=True): point_cloud_range = np.array(point_cloud_range, dtype=np.float32) # [0, -40, -3, 70.4, 40, 1] voxel_size = np.array(voxel_size, dtype=np.float32) grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size grid_size = np.round(grid_size).astype(np.int64) voxelmap_shape = tuple(np.round(grid_size).astype(np.int32).tolist()) voxelmap_shape = voxelmap_shape[::-1] self._coor_to_voxelidx = np.full(voxelmap_shape, -1, dtype=np.int32) self._voxel_size = voxel_size self._point_cloud_range = point_cloud_range self._max_num_points = max_num_points self._max_voxels = max_voxels self._grid_size = grid_size self._full_mean = full_mean def generate(self, points, max_voxels=None): res = points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._coor_to_voxelidx, self._max_num_points, max_voxels or self._max_voxels, self._full_mean) voxels = res["voxels"] coors = res["coordinates"] num_points_per_voxel = res["num_points_per_voxel"] voxel_num = res["voxel_num"] coors = coors[:voxel_num] voxels = voxels[:voxel_num] num_points_per_voxel = num_points_per_voxel[:voxel_num] return (voxels, coors, num_points_per_voxel) def generate_multi_gpu(self, points, max_voxels=None): res = points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._coor_to_voxelidx, self._max_num_points, max_voxels or self._max_voxels, self._full_mean) voxels = res["voxels"] coors = res["coordinates"] num_points_per_voxel = res["num_points_per_voxel"] voxel_num = res["voxel_num"] return (voxels, coors, num_points_per_voxel) @property def voxel_size(self): return self._voxel_size @property def max_num_points_per_voxel(self): return self._max_num_points @property def point_cloud_range(self): return self._point_cloud_range @property def grid_size(self): return self._grid_size class VoxelGeneratorV2: def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000, full_mean=False, block_filtering=False, block_factor=8, block_size=3, height_threshold=0.1, height_high_threshold=2.0): assert full_mean is False, "don't use this." point_cloud_range = np.array(point_cloud_range, dtype=np.float32) # [0, -40, -3, 70.4, 40, 1] voxel_size = np.array(voxel_size, dtype=np.float32) grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size grid_size = np.round(grid_size).astype(np.int64) if block_filtering: assert block_size > 0 assert grid_size[0] % block_factor == 0 assert grid_size[1] % block_factor == 0 voxelmap_shape = tuple(np.round(grid_size).astype(np.int32).tolist()) voxelmap_shape = voxelmap_shape[::-1] self._coor_to_voxelidx = np.full(voxelmap_shape, -1, dtype=np.int32) self._voxel_size = voxel_size self._point_cloud_range = point_cloud_range self._max_num_points = max_num_points self._max_voxels = max_voxels self._grid_size = grid_size self._full_mean = full_mean self._block_filtering = block_filtering self._block_factor = block_factor self._height_threshold = height_threshold self._block_size = block_size self._height_high_threshold = height_high_threshold def generate(self, points, max_voxels=None): res = points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._coor_to_voxelidx, self._max_num_points, max_voxels or self._max_voxels, self._full_mean, self._block_filtering, self._block_factor, self._block_size, self._height_threshold, self._height_high_threshold) for k, v in res.items(): if k != "voxel_num": res[k] = v[:res["voxel_num"]] return res def generate_multi_gpu(self, points, max_voxels=None): res = points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._coor_to_voxelidx, self._max_num_points, max_voxels or self._max_voxels, self._full_mean, self._block_filtering, self._block_factor, self._block_size, self._height_threshold, self._height_high_threshold, pad_output=True) return res @property def voxel_size(self): return self._voxel_size @property def max_num_points_per_voxel(self): return self._max_num_points @property def point_cloud_range(self): return self._point_cloud_range @property def grid_size(self): return self._grid_size
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/debugging.cpp
C++
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision:$ // $Date:$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * debugging.cpp * * @brief Debugging/statistics/performance utilities for hash tables. */ #include <cuhash/debugging.h> #include <cuhash/definitions.h> #include <algorithm> #include <cstring> #include <cuhash/cuda_util.h> namespace cuhash { void OutputRetrievalStatistics(const unsigned n_queries, const unsigned *d_retrieval_probes, const unsigned n_functions) { unsigned *retrieval_probes = new unsigned[n_queries]; CUDA_SAFE_CALL(cudaMemcpy(retrieval_probes, d_retrieval_probes, sizeof(unsigned) * n_queries, cudaMemcpyDeviceToHost)); // Create a histogram showing how many items needed how many probes to be // found. unsigned possible_probes = n_functions + 2; unsigned *histogram = new unsigned[possible_probes]; memset(histogram, 0, sizeof(unsigned) * (possible_probes)); for (unsigned i = 0; i < n_queries; ++i) { histogram[retrieval_probes[i]]++; } // Dump it. char buffer[10000]; sprintf(buffer, "Probes for retrieval: "); PrintMessage(buffer); for (unsigned i = 0; i < possible_probes; ++i) { sprintf(buffer, "\t(%u, %u)", i, histogram[i]); PrintMessage(buffer); } delete[] retrieval_probes; delete[] histogram; } void OutputBuildStatistics(const unsigned n, const unsigned *d_iterations_taken) { // Output how many iterations each thread took until it found an empty slot. unsigned *iterations_taken = new unsigned[n]; CUDA_SAFE_CALL(cudaMemcpy(iterations_taken, d_iterations_taken, sizeof(unsigned) * n, cudaMemcpyDeviceToHost)); std::sort(iterations_taken, iterations_taken + n); unsigned total_iterations = 0; unsigned max_iterations_taken = 0; for (unsigned i = 0; i < n; ++i) { total_iterations += iterations_taken[i]; max_iterations_taken = std::max(max_iterations_taken, iterations_taken[i]); } unsigned current_value = iterations_taken[0]; unsigned count = 1; char buffer[10000]; sprintf(buffer, "Iterations taken:\n"); for (unsigned i = 1; i < n; ++i) { if (iterations_taken[i] != current_value) { sprintf(buffer, "%s\t(%u, %u)\n", buffer, current_value, count); current_value = iterations_taken[i]; count = 1; } else { count++; } } sprintf(buffer, "%s\t(%u, %u)", buffer, current_value, count); PrintMessage(buffer); sprintf(buffer, "Total iterations: %u", total_iterations); PrintMessage(buffer); sprintf(buffer, "Avg/Med/Max iterations: (%f %u %u)", (float)total_iterations / n, iterations_taken[n / 2], iterations_taken[n - 1]); PrintMessage(buffer); delete[] iterations_taken; // Print the length of the longest eviction chain. sprintf(buffer, "Max iterations: %u", max_iterations_taken); PrintMessage(buffer); } }; // namespace cuhash // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/debugging.cu
CUDA
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision:$ // $Date:$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * debugging.cu * * @brief Debugging/statistics/performance utilities for hash tables. */ #include <cuhash/debugging.h> #include <cuhash/definitions.h> #include <cuhash/hash_table.cuh> #include <algorithm> #include <cuhash/cuda_util.h> namespace cuhash { //! Debugging function: Takes statistics on the hash functions' distribution. /*! Determines: * - How many unique slots each key has. * - How many keys hash into each slot. * - Whether any keys failed to get a full set of slots. */ __global__ void take_hash_function_statistics_kernel( const unsigned *keys, const unsigned n_entries, const unsigned table_size, const uint2 *constants, const unsigned num_functions, unsigned *num_slots_available, unsigned *num_hashing_in, unsigned *failed) { unsigned thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= n_entries) return; unsigned key = keys[thread_index]; // Determine all of the locations the key hashes into. // Also count how many keys hash into each location. unsigned locations[kMaxHashFunctions]; for (unsigned i = 0; i < num_functions; ++i) { locations[i] = hash_function_inner(constants[i], key) % table_size; if (num_hashing_in != NULL) { atomicAdd(num_hashing_in + locations[i], 1); } } // Determine whether all of the locations were different. unsigned num_slots = 1; for (unsigned i = 1; i < num_functions; ++i) { bool matched = false; for (unsigned j = 0; j < i; ++j) { if (locations[i] == locations[j]) { matched = true; break; } } if (!matched) { num_slots++; } } if (num_slots_available != NULL) { num_slots_available[thread_index] = num_slots; } if (failed != NULL && num_slots != num_functions) { *failed = 1; } } void TakeHashFunctionStatistics(const unsigned num_keys, const unsigned *d_keys, const unsigned table_size, const uint2 *constants, const unsigned kNumHashFunctions) { char buffer[16000]; PrintMessage("Hash function constants: "); for (unsigned i = 0; i < kNumHashFunctions; ++i) { sprintf(buffer, "\t%10u, %10u", constants[i].x, constants[i].y); PrintMessage(buffer); } unsigned *d_num_hashing_in = NULL; #ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT CUDA_SAFE_CALL( cudaMalloc((void **)&d_num_hashing_in, sizeof(unsigned) * table_size)); CUDA_SAFE_CALL( cudaMemset(d_num_hashing_in, 0, sizeof(unsigned) * table_size)); #endif unsigned *d_num_slots_available = NULL; #ifdef COUNT_HOW_MANY_HAVE_CYCLES CUDA_SAFE_CALL( cudaMalloc((void **)&d_num_slots_available, sizeof(unsigned) * num_keys)); #endif uint2 *d_constants = NULL; CUDA_SAFE_CALL( cudaMalloc((void **)&d_constants, sizeof(uint2) * kNumHashFunctions)); CUDA_SAFE_CALL(cudaMemcpy(d_constants, constants, sizeof(uint2) * kNumHashFunctions, cudaMemcpyHostToDevice)); take_hash_function_statistics_kernel<<<ComputeGridDim(num_keys), kBlockSize>>>( d_keys, num_keys, table_size, d_constants, kNumHashFunctions, d_num_slots_available, d_num_hashing_in, NULL); CUDA_SAFE_CALL(cudaFree(d_constants)); #ifdef COUNT_HOW_MANY_HASH_INTO_EACH_SLOT unsigned *num_hashing_in = new unsigned[table_size]; CUDA_SAFE_CALL(cudaMemcpy(num_hashing_in, d_num_hashing_in, sizeof(unsigned) * table_size, cudaMemcpyDeviceToHost)); /* // Print how many items hash into each slot. // Used to make sure items are spread evenly throughout the table. buffer[0] = '\0'; PrintMessage("Num hashing into each: ", true); for (unsigned i = 0; i < table_size; ++i) { sprintf(buffer, "%s\t%2u", buffer, num_hashing_in[i]); if (i % 25 == 24) { PrintMessage(buffer, true); buffer[0] = '\0'; } } PrintMessage(buffer,true); */ // Print a histogram of how many items are hashed into each slot. Shows // if average number of items hashing into each slot is low. std::sort(num_hashing_in, num_hashing_in + table_size); int count = 1; unsigned previous = num_hashing_in[0]; sprintf(buffer, "Num items hashing into a slot:\t"); PrintMessage(buffer); for (unsigned i = 1; i < table_size; ++i) { if (num_hashing_in[i] != previous) { sprintf(buffer, "\t(%u, %u)", previous, count); PrintMessage(buffer); previous = num_hashing_in[i]; count = 1; } else { count++; } } sprintf(buffer, "\t(%u, %u)", previous, count); PrintMessage(buffer); delete[] num_hashing_in; CUDA_SAFE_CALL(cudaFree(d_num_hashing_in)); #endif #ifdef COUNT_HOW_MANY_HAVE_CYCLES unsigned *num_slots_available = new unsigned[num_keys]; CUDA_SAFE_CALL(cudaMemcpy(num_slots_available, d_num_slots_available, sizeof(unsigned) * num_keys, cudaMemcpyDeviceToHost)); static const unsigned kHistogramSize = kNumHashFunctions + 1; unsigned *histogram = new unsigned[kHistogramSize]; memset(histogram, 0, sizeof(unsigned) * kHistogramSize); for (unsigned i = 0; i < num_keys; ++i) { histogram[num_slots_available[i]]++; } sprintf(buffer, "Slots assigned to each key: "); for (unsigned i = 1; i < kHistogramSize; ++i) { sprintf(buffer, "%s(%u, %u) ", buffer, i, histogram[i]); } PrintMessage(buffer); delete[] histogram; delete[] num_slots_available; CUDA_SAFE_CALL(cudaFree(d_num_slots_available)); #endif } bool CheckAssignedSameSlot(const unsigned N, const unsigned num_keys, const unsigned *d_keys, const unsigned table_size, uint2 *constants) { unsigned *d_cycle_exists = NULL; uint2 *d_constants = NULL; CUDA_SAFE_CALL(cudaMalloc((void **)&d_cycle_exists, sizeof(unsigned))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_constants, sizeof(uint2) * N)); CUDA_SAFE_CALL(cudaMemset(d_cycle_exists, 0, sizeof(unsigned))); CUDA_SAFE_CALL(cudaMemcpy(d_constants, constants, sizeof(uint2) * N, cudaMemcpyHostToDevice)); // Check if all keys were given a full set of N slots by the functions. take_hash_function_statistics_kernel<<<ComputeGridDim(num_keys), kBlockSize>>>( d_keys, num_keys, table_size, d_constants, N, NULL, NULL, d_cycle_exists); unsigned cycle_exists; CUDA_SAFE_CALL(cudaMemcpy(&cycle_exists, d_cycle_exists, sizeof(unsigned), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_cycle_exists)); CUDA_SAFE_CALL(cudaFree(d_constants)); return (cycle_exists != 0); } void PrintStashContents(const Entry *d_stash) { Entry *stash = new Entry[cuhash::kStashSize]; CUDA_SAFE_CALL(cudaMemcpy(stash, d_stash, sizeof(Entry) * cuhash::kStashSize, cudaMemcpyDeviceToHost)); for (unsigned i = 0; i < cuhash::kStashSize; ++i) { if (get_key(stash[i]) != kKeyEmpty) { char buffer[256]; sprintf(buffer, "Stash[%u]: %u = %u", i, get_key(stash[i]), get_value(stash[i])); PrintMessage(buffer, true); } } delete[] stash; } }; // namespace cuhash // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/hash_functions.cpp
C++
// nvcc (cuda) 9.0 with gcc 5.5 don't support random, so compile it in host #include <random> namespace cuhash { std::random_device random_dev; std::mt19937 random_engine(random_dev()); std::uniform_int_distribution<unsigned> uint_distribution; unsigned generate_random_uint32() { return uint_distribution(random_engine); } } // namespace cuhash
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/hash_functions.cu
CUDA
#include <cassert> #include <cuhash/debugging.h> #include <cuhash/hash_functions.h> #include <cuhash/hash_table.h> namespace cuhash { void GenerateFunctions(const unsigned N, const unsigned num_keys, const unsigned *d_keys, const unsigned table_size, uint2 *constants) { bool regenerate = true; while (regenerate) { regenerate = false; // Generate a set of hash function constants for this build attempt. for (unsigned i = 0; i < N; ++i) { // uint_distribution(random_engine) % kPrimeDivisor; // genrand_int32() % kPrimeDivisor; unsigned new_a = generate_random_uint32() % kPrimeDivisor; constants[i].x = (1 > new_a ? 1 : new_a); constants[i].y = generate_random_uint32() % kPrimeDivisor; } #ifdef FORCEFULLY_GENERATE_NO_CYCLES // Ensure that every key gets N different slots. regenerate = CheckAssignedSameSlot(N, num_keys, d_keys, table_size, constants); #endif } #ifdef TAKE_HASH_FUNCTION_STATISTICS // Examine how well distributed the items are. TakeHashFunctionStatistics(num_keys, d_keys, table_size, constants, N); #endif } }; // namespace cuhash
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/hash_table.cpp
C++
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision:$ // $Date:$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file hash_table.cpp * * @brief Implements a basic hash table that stores one value per key. */ #include <cuhash/debugging.h> #include <cuhash/hash_table.h> #include <algorithm> #include <cmath> #include <cstdio> #include <cstring> #include <cuda_runtime_api.h> #include <cuhash/cuda_util.h> #include <limits> namespace cuhash { char buffer[256]; //! @name Internal /// @{ dim3 ComputeGridDim(unsigned n) { // Round up in order to make sure all items are hashed in. dim3 grid((n + kBlockSize - 1) / kBlockSize); if (grid.x > kGridSize) { grid.y = (grid.x + kGridSize - 1) / kGridSize; grid.x = kGridSize; } return grid; } unsigned ComputeMaxIterations(const unsigned n, const unsigned table_size, const unsigned num_functions) { float lg_input_size = (float)(log((double)n) / log(2.0)); // #define CONSTANT_ITERATIONS #ifdef CONSTANT_ITERATIONS // Set the maximum number of iterations to 7lg(N). const unsigned MAX_ITERATION_CONSTANT = 7; unsigned max_iterations = MAX_ITERATION_CONSTANT * lg_input_size; #else // Use an empirical formula for determining what the maximum number of // iterations should be. Works OK in most situations. float load_factor = float(n) / table_size; float ln_load_factor = (float)(log(load_factor) / log(2.71828183)); unsigned max_iterations = (unsigned)(4.0 * ceil(-1.0 / (0.028255 + 1.1594772 * ln_load_factor) * lg_input_size)); #endif return max_iterations; } /// @} HashTable::HashTable() : table_size_(0), d_contents_(NULL), stash_count_(0), d_failures_(NULL) { CUDA_CHECK_ERROR("Failed in constructor.\n"); } bool HashTable::Initialize(const unsigned max_table_entries, const float space_usage, const unsigned num_functions) { Release(); // Determine the minimum amount of slots the table requires, // and whether the space_usage is within range. float minimum_space_usage; if (num_functions < 2 || num_functions > 5) { char message[256] = "Number of hash functions must be from 2 to 5; " "others are unimplemented."; PrintMessage(message, true); return false; } else { minimum_space_usage = kMinimumSpaceUsages[num_functions]; } if (space_usage < minimum_space_usage) { sprintf(buffer, "Minimum possible space usage for %u functions is %f.", num_functions, minimum_space_usage); PrintMessage(buffer); return false; } num_hash_functions_ = num_functions; table_size_ = unsigned(ceil(max_table_entries * space_usage)); // Allocate memory. const unsigned slots_to_allocate = table_size_ + kStashSize; CUDA_SAFE_CALL( cudaMalloc((void **)&d_contents_, sizeof(Entry) * slots_to_allocate)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_failures_, sizeof(unsigned))); if (!d_contents_ || !d_failures_) { fprintf(stderr, "Failed to allocate %u slots.\n", slots_to_allocate); return false; } CUDA_CHECK_ERROR("Failed to initialize.\n"); return true; } void HashTable::Release() { table_size_ = 0; CUDA_SAFE_CALL(cudaFree(d_contents_)); CUDA_SAFE_CALL(cudaFree(d_failures_)); d_contents_ = NULL; d_failures_ = NULL; CUDA_CHECK_ERROR("Failed during release.\n"); } bool HashTable::Build(const unsigned n, const unsigned *d_keys, const unsigned *d_values) { unsigned max_iterations = ComputeMaxIterations(n, table_size_, num_hash_functions_); unsigned num_failures = 1; unsigned num_attempts = 0; // Storage for statistics collection. unsigned *d_iterations_taken = NULL; #ifdef TRACK_ITERATIONS CUDA_SAFE_CALL( cudaMalloc((void **)&d_iterations_taken, sizeof(unsigned) * n)); #endif // Track how many items ended up in the stash. unsigned *d_stash_count = NULL; CUDA_SAFE_CALL(cudaMalloc((void **)&d_stash_count, sizeof(unsigned))); CUDA_CHECK_ERROR("Failed before main build loop.\n"); // Main build loop. while (num_failures && ++num_attempts < kMaxRestartAttempts) { CUDA_SAFE_CALL(cudaMemset(d_stash_count, 0, sizeof(unsigned))); // Generate new hash functions. if (num_hash_functions_ == 2) constants_2_.Generate(n, d_keys, table_size_); else if (num_hash_functions_ == 3) constants_3_.Generate(n, d_keys, table_size_); else if (num_hash_functions_ == 4) constants_4_.Generate(n, d_keys, table_size_); else constants_5_.Generate(n, d_keys, table_size_); stash_constants_.x = std::max(1u, generate_random_uint32()) % kPrimeDivisor; stash_constants_.y = generate_random_uint32() % kPrimeDivisor; stash_count_ = 0; // Initialize memory. unsigned slots_in_table = table_size_ + kStashSize; CUDAWrapper::ClearTable(slots_in_table, kEntryEmpty, d_contents_); num_failures = 0; CUDAWrapper::CallCuckooHash( n, num_hash_functions_, d_keys, d_values, table_size_, constants_2_, constants_3_, constants_4_, constants_5_, max_iterations, d_contents_, stash_constants_, d_stash_count, d_failures_, d_iterations_taken); // Check if successful. CUDA_SAFE_CALL(cudaMemcpy(&num_failures, d_failures_, sizeof(unsigned), cudaMemcpyDeviceToHost)); #ifdef COUNT_UNINSERTED if (num_failures) { printf("Failed to insert %u items.\n", num_failures); } #endif } // Copy out the stash size. CUDA_SAFE_CALL(cudaMemcpy(&stash_count_, d_stash_count, sizeof(unsigned), cudaMemcpyDeviceToHost)); if (stash_count_ && num_failures == 0) { // sprintf(buffer, "Stash size: %u", stash_count_); // PrintMessage(buffer, true); #ifdef _DEBUG PrintStashContents(d_contents_ + table_size_); #endif } CUDA_SAFE_CALL(cudaFree(d_stash_count)); #ifdef TRACK_ITERATIONS if (num_failures == 0) { OutputBuildStatistics(n, d_iterations_taken); } CUDA_SAFE_CALL(cudaFree(d_iterations_taken)); #endif // Dump some info if a restart was required. if (num_attempts >= kMaxRestartAttempts) { sprintf(buffer, "Completely failed to build"); PrintMessage(buffer, true); } else if (num_attempts > 1) { sprintf(buffer, "Needed %u attempts to build, you can ignore this message.", num_attempts); PrintMessage(buffer, true); } CUDA_CHECK_ERROR("Error occurred during hash table build.\n"); return num_failures == 0; } void HashTable::Retrieve(const unsigned n_queries, const unsigned *d_keys, unsigned *d_values) { CUDAWrapper::CallHashRetrieve(n_queries, num_hash_functions_, d_keys, table_size_, d_contents_, constants_2_, constants_3_, constants_4_, constants_5_, stash_constants_, stash_count_, d_values); } }; // namespace cuhash // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/hash_table.cu
CUDA
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision:$ // $Date:$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt in // the root directory of this source distribution. // ------------------------------------------------------------- /** * @file hash_table.cu * * @brief Hides all of the CUDA calls from the actual CPP file. */ #include <cuhash/cuda_util.h> #include <cuhash/debugging.h> #include <cuhash/definitions.h> #include <cuhash/hash_table.cuh> #include <cuda.h> namespace cuhash { namespace CUDAWrapper { void ClearTable(const unsigned slots_in_table, const Entry fill_value, Entry *d_contents) { clear_table<Entry><<<ComputeGridDim(slots_in_table), kBlockSize>>>( slots_in_table, fill_value, d_contents); TV_CHECK_CUDA_ERR_V2("Error occurred during hash table clear.\n"); } void CallCuckooHash(const unsigned n, const unsigned num_hash_functions, const unsigned *d_keys, const unsigned *d_values, const unsigned table_size, const Functions<2> constants_2, const Functions<3> constants_3, const Functions<4> constants_4, const Functions<5> constants_5, const unsigned max_iterations, Entry *d_contents, uint2 stash_constants, unsigned *d_stash_count, unsigned *d_failures, unsigned *d_iterations_taken) { // Build the table. cudaMemset(d_failures, 0, sizeof(unsigned)); if (num_hash_functions == 2) { CuckooHash<<<ComputeGridDim(n), kBlockSize>>>( n, d_keys, d_values, table_size, constants_2, max_iterations, d_contents, stash_constants, d_stash_count, d_failures, d_iterations_taken); } else if (num_hash_functions == 3) { CuckooHash<<<ComputeGridDim(n), kBlockSize>>>( n, d_keys, d_values, table_size, constants_3, max_iterations, d_contents, stash_constants, d_stash_count, d_failures, d_iterations_taken); } else if (num_hash_functions == 4) { CuckooHash<<<ComputeGridDim(n), kBlockSize>>>( n, d_keys, d_values, table_size, constants_4, max_iterations, d_contents, stash_constants, d_stash_count, d_failures, d_iterations_taken); } else { CuckooHash<<<ComputeGridDim(n), kBlockSize>>>( n, d_keys, d_values, table_size, constants_5, max_iterations, d_contents, stash_constants, d_stash_count, d_failures, d_iterations_taken); } CUDA_CHECK_ERROR("Error occurred during hash table build.\n"); } void CallHashRetrieve(const unsigned n_queries, const unsigned num_hash_functions, const unsigned *d_keys, const unsigned table_size, const Entry *d_contents, const Functions<2> constants_2, const Functions<3> constants_3, const Functions<4> constants_4, const Functions<5> constants_5, const uint2 stash_constants, const unsigned stash_count, unsigned *d_values) { unsigned *d_retrieval_probes = NULL; #ifdef TRACK_ITERATIONS CUDA_SAFE_CALL( cudaMalloc((void **)&d_retrieval_probes, sizeof(unsigned) * n_queries)); #endif if (num_hash_functions == 2) { hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>( n_queries, d_keys, table_size, d_contents, constants_2, stash_constants, stash_count, d_values, d_retrieval_probes); } else if (num_hash_functions == 3) { hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>( n_queries, d_keys, table_size, d_contents, constants_3, stash_constants, stash_count, d_values, d_retrieval_probes); } else if (num_hash_functions == 4) { hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>( n_queries, d_keys, table_size, d_contents, constants_4, stash_constants, stash_count, d_values, d_retrieval_probes); } else { hash_retrieve<<<ComputeGridDim(n_queries), kBlockSize>>>( n_queries, d_keys, table_size, d_contents, constants_5, stash_constants, stash_count, d_values, d_retrieval_probes); } CUDA_CHECK_ERROR("Retrieval failed.\n"); #ifdef TRACK_ITERATIONS OutputRetrievalStatistics(n_queries, d_retrieval_probes, num_hash_functions); CUDA_SAFE_CALL(cudaFree(d_retrieval_probes)); #endif } }; // namespace CUDAWrapper }; // namespace cuhash
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/cuhash/main.cc
C++
#include <cuda.h> #include <cuhash/hash_table.h> int main() { auto table = cuhash::HashTable(); table.Initialize(10, 2.0); const int N = 10; // ハッシュテーブルに格納するデータ int keys[N] = {1, 6, 4, 9, 0, 3, 7, 2, 5, 8}; int vals[N] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; // デバイスメモリにコピー int *d_keys, *d_vals; cudaMalloc((void **)&d_keys, sizeof(int) * N); cudaMemcpy(d_keys, keys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_vals, sizeof(int) * N); cudaMemcpy(d_vals, vals, sizeof(int) * N, cudaMemcpyHostToDevice); // ハッシュテーブルにクエリするデータ int input[N] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; int output[N]; // デバイスメモリにコピー int *d_input, *d_output; cudaMalloc((void **)&d_input, sizeof(int) * N); cudaMemcpy(d_input, input, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_output, sizeof(int) * N); cudaMemset(d_output, 0, sizeof(int) * N); bool s = table.Build(N, (const unsigned int *)d_keys, (const unsigned int *)d_vals); std::cout << s << std::endl; table.Retrieve(N, (const unsigned int *)d_input, (unsigned int *)d_output); std::cout << s << std::endl; cudaMemcpy(output, d_output, sizeof(int) * N, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { printf("%d\n", output[i]); } return 0; }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/all.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <spconv/fused_spconv_ops.h> #include <spconv/nms_ops.h> #include <spconv/pillar_scatter_ops.h> #include <spconv/pool_ops.h> #include <spconv/spconv_ops.h> #include <torch/script.h> static auto registry = torch::RegisterOperators() .op("spconv::get_indice_pairs", &spconv::getIndicePairs) .op("spconv::indice_conv", &spconv::indiceConv) .op("spconv::indice_conv_batch", &spconv::indiceConvBatch) .op("spconv::indice_conv_backward", &spconv::indiceConvBackward) .op("spconv::fused_indice_conv_bn", &spconv::fusedIndiceConvBatchNorm) .op("spconv::indice_maxpool", &spconv::indiceMaxPool) .op("spconv::indice_maxpool_backward", &spconv::indiceMaxPoolBackward) .op("spconv::nms", &spconv::nonMaxSuppression<float>) .op("spconv::pillar_scatter_float", &spconv::pointPillarScatter<float>) .op("spconv::pillar_scatter_half", &spconv::pointPillarScatter<at::Half>);
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/cublas_gemm.cc
C++
#include <ATen/ATen.h> #include <spconv/cublas_gemm.h> namespace spconv { template <> cublasStatus_t cublasTgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } template <> cublasStatus_t cublasTgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const __half *alpha, const __half *A, int lda, const __half *B, int ldb, const __half *beta, __half *C, int ldc) { return cublasHgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } template <> cublasStatus_t cublasTgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const at::Half *alpha, const at::Half *A, int lda, const at::Half *B, int ldb, const at::Half *beta, at::Half *C, int ldc) { return cublasHgemm(handle, transa, transb, m, n, k, reinterpret_cast<const __half *>(alpha), reinterpret_cast<const __half *>(A), lda, reinterpret_cast<const __half *>(B), ldb, reinterpret_cast<const __half *>(beta), reinterpret_cast<__half *>(C), ldc); } template <> cublasStatus_t cublasTgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) { return cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/indice.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/Parallel.h> #include <spconv/geometry.h> #include <spconv/indice.h> #include <spconv/spconv_ops.h> #include <tensorview/tensor.h> #include <torch/script.h> namespace spconv { template <typename Index, typename IndexGrid, unsigned NDim> Index getIndicePairsConv(tv::TensorView<const Index> indicesIn, tv::TensorView<Index> indicesOut, tv::TensorView<IndexGrid> gridsOut, tv::TensorView<Index> indicePairs, tv::TensorView<Index> indiceNum, const Index *kernelSize, const Index *stride, const Index *padding, const Index *dilation, const Index *outSpatialShape) { // indicesOut: num_active * kernelVolume * (NDim + 1) Index numAct = 0; auto numActIn = indicesIn.dim(0); Index batchIdx = 0; Index spatialVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } Index kernelVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { kernelVolume *= kernelSize[i]; } Index numValidPoints = 0; std::vector<Index> validPoints_(kernelVolume * (NDim + 1)); Index *validPoints = validPoints_.data(); Index *pointPtr = nullptr; Index hashval; tsl::robin_map<Index, Index> hash; for (int j = 0; j < numActIn; ++j) { batchIdx = indicesIn(j, 0); numValidPoints = getValidOutPos<Index, NDim>( indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, dilation, outSpatialShape, validPoints); for (Index i = 0; i < numValidPoints; ++i) { pointPtr = validPoints + i * (NDim + 1); auto offset = pointPtr[NDim]; auto index = tv::rowArrayIdx<Index, NDim>(pointPtr, outSpatialShape) + spatialVolume * batchIdx; auto iter = hash.find(index); if (iter == hash.end()) { for (unsigned k = 1; k < NDim + 1; ++k) { indicesOut(numAct, k) = pointPtr[k - 1]; } indicesOut(numAct, 0) = batchIdx; hashval = numAct++; hash[index] = hashval; } else { hashval = iter->second; } // indicePairs: [K, 2, L] indicePairs(0, offset, indiceNum[offset]) = j; indicePairs(1, offset, indiceNum[offset]++) = hashval; } } return numAct; } template <typename Index, typename IndexGrid, unsigned NDim> Index getIndicePairsDeConv(tv::TensorView<const Index> indicesIn, tv::TensorView<Index> indicesOut, tv::TensorView<IndexGrid> gridsOut, tv::TensorView<Index> indicePairs, tv::TensorView<Index> indiceNum, const Index *kernelSize, const Index *stride, const Index *padding, const Index *dilation, const Index *outSpatialShape) { Index numAct = 0; auto numActIn = indicesIn.dim(0); Index batchIdx = 0; Index spatialVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } Index kernelVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { kernelVolume *= kernelSize[i]; } Index numValidPoints = 0; std::vector<Index> validPoints_(kernelVolume * (NDim + 1)); Index *validPoints = validPoints_.data(); Index *pointPtr = nullptr; Index hashval; tsl::robin_map<Index, Index> hash; for (int j = 0; j < numActIn; ++j) { batchIdx = indicesIn(j, 0); numValidPoints = getValidOutPosTranspose<Index, NDim>( indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, dilation, outSpatialShape, validPoints); for (Index i = 0; i < numValidPoints; ++i) { pointPtr = validPoints + i * (NDim + 1); auto offset = pointPtr[NDim]; auto index = tv::rowArrayIdx<Index, NDim>(pointPtr, outSpatialShape) + spatialVolume * batchIdx; auto iter = hash.find(index); if (iter == hash.end()) { for (unsigned k = 1; k < NDim + 1; ++k) { indicesOut(numAct, k) = pointPtr[k - 1]; } indicesOut(numAct, 0) = batchIdx; hashval = numAct++; hash[index] = hashval; } else { hashval = iter->second; } // indicePairs: [K, 2, L] indicePairs(0, offset, indiceNum[offset]) = j; indicePairs(1, offset, indiceNum[offset]++) = hashval; } } return numAct; } #ifndef TV_WINDOWS template <typename Index, typename IndexGrid, unsigned NDim> Index getIndicePairsSubM(tv::TensorView<const Index> indicesIn, tv::TensorView<IndexGrid> gridsOut, tv::TensorView<Index> indicePairs, tv::TensorView<Index> indiceNum, const Index *const kernelSize, const Index *const stride, const Index *const padding, const Index *dilation, const Index *const outSpatialShape) { Index numAct = 0; auto numActIn = indicesIn.dim(0); Index batchIdx = 0; Index spatialVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } Index kernelVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { kernelVolume *= kernelSize[i]; } tsl::robin_map<Index, Index> hash; for (int j = 0; j < numActIn; ++j) { Index index = 0; index = tv::rowArrayIdx<Index, NDim>(indicesIn.data() + j * (NDim + 1) + 1, outSpatialShape) + spatialVolume * indicesIn(j, 0); hash[index] = j; } at::parallel_for(0, numActIn, 0, [&](int64_t begin, int64_t end) { Index index = 0; Index numValidPoints = 0; std::vector<Index> validPoints_(kernelVolume * (NDim + 1)); Index *validPoints = validPoints_.data(); Index *pointPtr = nullptr; Index oldOffset = 0; for (int j = begin; j < end; ++j) { numValidPoints = getValidOutPos<Index, NDim>( indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, dilation, outSpatialShape, validPoints); for (Index i = 0; i < numValidPoints; ++i) { pointPtr = validPoints + i * (NDim + 1); auto offset = pointPtr[NDim]; index = tv::rowArrayIdx<Index, NDim>(pointPtr, outSpatialShape) + spatialVolume * indicesIn(j, 0); auto iter = hash.find(index); if (iter != hash.end()) { #pragma omp atomic capture oldOffset = indiceNum[offset]++; indicePairs(0, offset, oldOffset) = j; indicePairs(1, offset, oldOffset) = iter->second; } } } }); return numActIn; } #else template <typename Index, typename IndexGrid, unsigned NDim> Index getIndicePairsSubM(tv::TensorView<const Index> indicesIn, tv::TensorView<IndexGrid> gridsOut, tv::TensorView<Index> indicePairs, tv::TensorView<Index> indiceNum, const Index *const kernelSize, const Index *const stride, const Index *const padding, const Index *dilation, const Index *const outSpatialShape) { Index numAct = 0; auto numActIn = indicesIn.dim(0); Index batchIdx = 0; Index spatialVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } Index kernelVolume = 1; #pragma unroll for (int i = 0; i < NDim; ++i) { kernelVolume *= kernelSize[i]; } Index numValidPoints = 0; // Index validPoints[kernelVolume * (NDim + 1)]; std::vector<Index> validPoints_(kernelVolume * (NDim + 1)); Index *validPoints = validPoints_.data(); Index *pointPtr = nullptr; tsl::robin_map<Index, Index> hash; for (int j = 0; j < numActIn; ++j) { Index index = 0; index = tv::rowArrayIdx<Index, NDim>(indicesIn.data() + j * (NDim + 1) + 1, outSpatialShape) + spatialVolume * indicesIn(j, 0); hash[index] = j; } Index index = 0; for (int j = 0; j < numActIn; ++j) { numValidPoints = getValidOutPos<Index, NDim>( indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, dilation, outSpatialShape, validPoints); for (Index i = 0; i < numValidPoints; ++i) { pointPtr = validPoints + i * (NDim + 1); auto offset = pointPtr[NDim]; index = tv::rowArrayIdx<Index, NDim>(pointPtr, outSpatialShape) + spatialVolume * indicesIn(j, 0); auto iter = hash.find(index); if (iter != hash.end()) { indicePairs(0, offset, indiceNum[offset]) = j; indicePairs(1, offset, indiceNum[offset]++) = iter->second; } } } return numActIn; } #endif int create_conv_indice_pair_cpu( torch::Tensor indicesIn, torch::Tensor indicesOut, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; tv::dispatch_torch<int32_t, int64_t>(indicesIn.scalar_type(), [&](auto V) { using Index = TV_DECLTYPE(V); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); if (transpose) numActIn = getIndicePairsDeConv<Index, IndexGrid, NDim>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks.data(), st.data(), pa.data(), di.data(), ou.data()); else numActIn = getIndicePairsConv<Index, IndexGrid, NDim>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks.data(), st.data(), pa.data(), di.data(), ou.data()); }); }); return numActIn; } int create_submconv_indice_pair_cpu( torch::Tensor indicesIn, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; tv::dispatch_torch<int32_t, int64_t>(indicesIn.scalar_type(), [&](auto V) { using Index = TV_DECLTYPE(V); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); numActIn = getIndicePairsSubM<Index, IndexGrid, NDim>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks.data(), st.data(), pa.data(), di.data(), ou.data()); }); }); return numActIn; } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/indice.cu
CUDA
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <cuhash/hash_table.h> #include <limits> #include <spconv/indice.cu.h> #include <spconv/indice.h> #include <tensorview/cuda_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using max_kernel_vol_t = tv::mp_list_c<int, 9, 16, 27, 32, 128, 256, 4096>; int create_conv_indice_pair_p1_cuda( torch::Tensor indicesIn, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = kernelSize.size(); auto numActIn = indicesIn.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; if (transpose) { prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareDeConvIndicePairsKernel failed"); } else { prepareIndicePairsKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), tv::torch2tv<Index>(indicePairUnique), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("prepareIndicePairsKernel failed"); } #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, prepareDeConvIndicePairsKernel<Index, NDim, MaxKernelVolume>)); tv::ssprint("prepareIndicePairsKernel<", tv::type_s<Index>, NDim, MaxKernelVolume, ">", attr.numRegs); #endif }); }); }); return 1; } int create_conv_indice_pair_p2_cuda( torch::Tensor indicesIn, torch::Tensor indicesOut, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, torch::Tensor indicePairUnique, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); int numAct = indicePairUnique.size(0) - 1; auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; using IndexGrid = int32_t; tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numAct, 2.0, 4); unsigned *d_values = nullptr; cudaMalloc((void **)&d_values, sizeof(unsigned) * numAct); TV_CHECK_CUDA_ERR_V2("cudaMalloc failed"); arangeKernel<unsigned> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(d_values, numAct); TV_CHECK_CUDA_ERR_V2("arangeKernel failed"); bool res = table.Build( numAct, reinterpret_cast<unsigned *>(indicePairUnique.data_ptr<Index>()), d_values); cudaFree(d_values); TV_CHECK_CUDA_ERR_V2("cudaFree failed"); if (!res) { failed = true; return; } assignIndiceOutKernel<Index, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), numAct, tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignIndiceOutKernel failed"); auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); assignIndicePairsHashKernel<Index, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("assignIndicePairsHashKernel failed"); } else { assignGridAndIndiceOutKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numAct, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou, batchSize); TV_CHECK_CUDA_ERR_V2("assignGridAndIndiceOutKernel failed"); assignIndicePairsKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesOut), tv::torch2tv<IndexGrid>(gridsOut), numActIn, tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indicePairUnique), ou); TV_CHECK_CUDA_ERR_V2("assignIndicePairsKernel failed"); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignGridAndIndiceOutKernel<", tv::type_s<Index>, NDim, ">", attr.numRegs); cudaFuncAttributes attr2; checkCudaErrors(cudaFuncGetAttributes( &attr2, assignIndicePairsKernel<Index, IndexGrid, NDim>)); tv::ssprint("assignIndicePairsKernel<", tv::type_s<Index>, NDim, ">", attr2.numRegs); #endif } if (resetGrid && (!useHash)) { resetGridKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numAct), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(indicePairUnique.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), numAct); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numAct; } template <typename T> struct is_valid { __device__ __forceinline__ bool operator()(const T x) { return x != -1; } }; int create_submconv_indice_pair_cuda( torch::Tensor indicesIn, torch::Tensor gridsOut, torch::Tensor indicePairs, torch::Tensor indiceNum, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outSpatialShape, bool transpose, bool resetGrid, bool useHash) { auto stream = at::cuda::getCurrentCUDAStream(); auto ndim = outSpatialShape.size(); auto numActIn = indicesIn.size(0); int batchSize = gridsOut.size(0); auto kernelVolume = indiceNum.size(0); if (numActIn == 0) return 0; bool failed = false; tv::dispatch_torch<int32_t>(indicesIn.scalar_type(), [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); using IndexGrid = int32_t; tv::dispatch_int<2, 3, 4>(ndim, [&](auto I) { constexpr int NDim = TV_DECLTYPE(I)::value; tv::SimpleVector<Index, NDim> ks(kernelSize.begin(), kernelSize.end()); tv::SimpleVector<Index, NDim> st(stride.begin(), stride.end()); tv::SimpleVector<Index, NDim> pa(padding.begin(), padding.end()); tv::SimpleVector<Index, NDim> di(dilation.begin(), dilation.end()); tv::SimpleVector<Index, NDim> ou(outSpatialShape.begin(), outSpatialShape.end()); Index spatialVolume = 1; for (int i = 0; i < NDim; ++i) { spatialVolume *= outSpatialShape[i]; } if (useHash) { auto table = cuhash::HashTable(); // std::cout << "create " << numAct << " size table..." << std::endl; table.Initialize(numActIn, 2.0, 4); unsigned *d_keyvalues = nullptr; cudaMalloc((void **)&d_keyvalues, sizeof(unsigned) * numActIn * 2); unsigned *d_values = d_keyvalues + numActIn; TV_CHECK_CUDA_ERR_V2("cudaMalloc failed"); prepareSubMHashKernel<Index, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), d_keyvalues, d_values, ou); TV_CHECK_CUDA_ERR_V2("prepareSubMHashKernel failed"); bool res = table.Build(numActIn, reinterpret_cast<unsigned *>(d_keyvalues), reinterpret_cast<unsigned *>(d_values)); cudaFree(d_keyvalues); TV_CHECK_CUDA_ERR_V2("cudaFree failed"); if (!res) { failed = true; return; } auto tableSize = table.get_table_size(); auto tableData = table.data(); auto constants = table.get_constants_4(); auto stash_constants = table.get_stash_constants(); auto stash_count = table.get_stash_count(); tv::DispatchInt<max_kernel_vol_t>()( kernelVolume, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; getSubMIndicePairsHashKernel<Index, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou, tableSize, tableData, constants, stash_constants, stash_count); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsHashKernel failed"); }); } else { // auto timer = spconv::CudaContextTimer<>(); prepareSubMGridKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), ou, spatialVolume); // tv::ssprint("prepareSubMGridKernel", timer.report() / 1000.0); TV_CHECK_CUDA_ERR_V2("prepareSubMGridKernel failed"); // when dilation all one, we use a simple kernel to calc result bool dilation_one = true; for (int i = 0; i < NDim; ++i) { dilation_one &= di[i] == 1; } auto found = false; if (dilation_one && (NDim == 2 || NDim == 3)) { auto indiceNumCpu = indiceNum.cpu(); if (NDim == 2) { tv::SimpleVector<Index, 2> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; found = true; getSubMIndicePairsKernel2<Index, IndexGrid, K0, K1> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); } else if (NDim == 3) { tv::SimpleVector<Index, 3> ou_(outSpatialShape.begin(), outSpatialShape.end()); tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[0], [&](auto K0C) { tv::dispatch_int_noexcept<1, 3, 5>(kernelSize[1], [&](auto K1C) { tv::dispatch_int_noexcept<1, 3, 5>( kernelSize[2], [&](auto K2C) { constexpr int K0 = TV_DECLTYPE(K0C)::value; constexpr int K1 = TV_DECLTYPE(K1C)::value; constexpr int K2 = TV_DECLTYPE(K2C)::value; found = true; getSubMIndicePairsKernel3<Index, IndexGrid, K0, K1, K2> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>( tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ou_, spatialVolume); }); }); }); } } if (!found) { tv::DispatchInt< max_kernel_vol_t>()(ndim, std::less_equal<int>(), [&](auto I2) { constexpr int MaxKernelVolume = TV_DECLTYPE(I2)::value; getSubMIndicePairsKernel<Index, IndexGrid, NDim, MaxKernelVolume> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(tv::torch2tv<Index>(indicesIn), tv::torch2tv<IndexGrid>(gridsOut), tv::torch2tv<Index>(indicePairs), tv::torch2tv<Index>(indiceNum), ks, st, pa, di, ou); TV_CHECK_CUDA_ERR_V2("getSubMIndicePairsKernel failed"); }); } // tv::ssprint("getSubMIndicePairsKernel", timer.report() / 1000.0); } if (resetGrid && (!useHash)) { resetGridSubMKernel<Index, IndexGrid, NDim> <<<tv::cuda::getBlocks(numActIn), tv::cuda::CUDA_NUM_THREADS, 0, stream>>>(indicesIn.data_ptr<Index>(), tv::torch2tv<IndexGrid>(gridsOut), ou, numActIn); TV_CHECK_CUDA_ERR_V2("resetGridKernel failed"); } }); }); if (failed){ return -1; } return numActIn; } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/maxpool.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <spconv/maxpool.h> #include <torch/script.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; void maxpool_fwd_cpu(torch::Tensor outFeatures, torch::Tensor inFeatures, torch::Tensor indicesIn, torch::Tensor indicesOut, int size) { if (size <= 0) return; int stride = inFeatures.size(1); auto dtype = inFeatures.scalar_type(); auto int_dtype = indicesIn.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); auto outFeaturesData = outFeatures.data_ptr<T>(); auto inFeaturesData = inFeatures.data_ptr<T>(); auto indicesInData = indicesIn.data_ptr<Index>(); auto indicesOutData = indicesOut.data_ptr<Index>(); Index idxi, idxo; for (int row = 0; row < size; row++) { idxi = indicesInData[row] * stride; idxo = indicesOutData[row] * stride; for (int plane = 0; plane < stride; ++plane) if (outFeaturesData[idxo + plane] < inFeaturesData[idxi + plane]) outFeaturesData[idxo + plane] = inFeaturesData[idxi + plane]; } }); }); } void maxpool_bwd_cpu(torch::Tensor outFeatures, torch::Tensor inFeatures, torch::Tensor dout, torch::Tensor din, torch::Tensor indicesIn, torch::Tensor indicesOut, int size) { if (size <= 0) return; int stride = inFeatures.size(1); auto dtype = inFeatures.scalar_type(); auto int_dtype = indicesIn.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); auto outFeaturesData = outFeatures.data_ptr<T>(); auto inFeaturesData = inFeatures.data_ptr<T>(); auto doutData = dout.data_ptr<T>(); auto dinData = din.data_ptr<T>(); auto indicesInData = indicesIn.data_ptr<Index>(); auto indicesOutData = indicesOut.data_ptr<Index>(); Index idxi, idxo; for (int row = 0; row < size; row++) { idxi = indicesInData[row] * stride; idxo = indicesOutData[row] * stride; for (int plane = 0; plane < stride; ++plane) if (outFeaturesData[idxo + plane] == inFeaturesData[idxi + plane]) dinData[idxi + plane] += doutData[idxo + plane]; } }); }); } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/maxpool.cu
CUDA
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/maxpool.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensorview.h> #include <type_traits> namespace spconv { template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolFwdBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { T in, out; int ILPStrideY[NumILP]; Index idxo, idxi; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x; ix < numHot; ix += blockDim.x * gridDim.x) { { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; in = inFeatures[idxi]; out = outFeatures[idxo]; if (in > out) { outFeatures[idxo] = in; } } } } } template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolFwdGenericBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } for (int iy : tv::KernelLoopY<int>(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in > out) { outFeatures[RO[ilp] + iy] = in; } } } } } template <typename T, typename Index, int NumTLP, int NumILP, typename VecType> __global__ void maxPoolFwdVecBlockKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideY[NumILP]; constexpr int vecloadFactor = sizeof(VecType) / sizeof(T); T bufi[vecloadFactor]; T bufo[vecloadFactor]; Index idxi, idxo; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot; ix += blockDim.x * gridDim.x * vecloadFactor) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; reinterpret_cast<VecType *>(bufo)[0] = reinterpret_cast<VecType *>(outFeatures)[idxo]; reinterpret_cast<VecType *>(bufi)[0] = reinterpret_cast<const VecType *>(inFeatures)[idxi]; #pragma unroll for (int i = 0; i < vecloadFactor; i++) { if (bufi[i] > bufo[i]) { bufo[i] = bufi[i]; } } reinterpret_cast<VecType *>(outFeatures)[idxo] = reinterpret_cast<VecType *>(bufo)[0]; } } } template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolFwdGenericKernel(T *outFeatures, const T *inFeatures, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { if (ix + ILPStrideX[ilp] < numHot) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } } for (int iy : tv::KernelLoopY<int>(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { if (ix + ILPStrideX[ilp] < numHot) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in > out) { outFeatures[RO[ilp] + iy] = in; } } } } } } template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolBwdBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. T in, out; Index idxo, idxi; int ILPStrideY[NumILP]; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; dout += blockIdx.y * NumTLP; din += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x; ix < numHot; ix += blockDim.x * gridDim.x) { { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; in = inFeatures[idxi]; out = outFeatures[idxo]; if (in == out) { din[idxi] += dout[idxo]; } } } } } template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolBwdGenericBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } for (int iy : tv::KernelLoopY<int>(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in == out) { din[RI[ilp] + iy] += dout[RO[ilp] + iy]; } } } } } template <typename T, typename Index, int NumTLP, int NumILP, typename VecType> __global__ void maxPoolBwdVecBlockKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideY[NumILP]; constexpr int vecloadFactor = sizeof(VecType) / sizeof(T); T bufi[vecloadFactor]; T bufo[vecloadFactor]; T bufdi[vecloadFactor]; T bufdo[vecloadFactor]; Index idxi, idxo; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y; outFeatures += blockIdx.y * NumTLP; inFeatures += blockIdx.y * NumTLP; for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot; ix += blockDim.x * gridDim.x * vecloadFactor) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x; reinterpret_cast<VecType *>(bufo)[0] = reinterpret_cast<const VecType *>(outFeatures)[idxo]; reinterpret_cast<VecType *>(bufi)[0] = reinterpret_cast<const VecType *>(inFeatures)[idxi]; reinterpret_cast<VecType *>(bufdo)[0] = reinterpret_cast<const VecType *>(dout)[idxo]; reinterpret_cast<VecType *>(bufdi)[0] = reinterpret_cast<VecType *>(din)[idxi]; #pragma unroll for (int i = 0; i < vecloadFactor; i++) { if (bufi[i] == bufo[i]) { bufdi[i] += bufdo[i]; } } reinterpret_cast<VecType *>(din)[idxi] = reinterpret_cast<VecType *>(bufdi)[0]; } } } template <typename T, typename Index, int NumTLP, int NumILP> __global__ void maxPoolBwdGenericKernel(const T *outFeatures, const T *inFeatures, const T *dout, T *din, const Index *indicesIn, const Index *indicesOut, int numHot, int numPlanes) { // see http://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf. int ILPStrideX[NumILP]; Index RI[NumILP]; Index RO[NumILP]; T in, out; #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x; for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ilp++) { if (ix + ILPStrideX[ilp] < numHot) { RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes; RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes; } } for (int iy : tv::KernelLoopY<int>(numPlanes)) { #pragma unroll for (int ilp = 0; ilp < NumILP; ++ilp) { if (ix + ILPStrideX[ilp] < numHot) { in = inFeatures[RI[ilp] + iy]; out = outFeatures[RO[ilp] + iy]; if (in == out) { din[RI[ilp] + iy] += dout[RO[ilp] + iy]; } } } } } } using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; void maxpool_fwd_cuda(torch::Tensor outFeatures, torch::Tensor inFeatures, torch::Tensor indicesIn, torch::Tensor indicesOut, int size) { if (size <= 0) return; int numPlanes = inFeatures.size(1); auto dtype = inFeatures.scalar_type(); auto int_dtype = indicesIn.scalar_type(); auto stream = at::cuda::getCurrentCUDAStream(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &indicesIn, &indicesOut, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (numHotBlock >= NumTLP) { maxPoolFwdVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP), dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0, stream>>>( outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), indicesIn.data_ptr<Index>(), indicesOut.data_ptr<Index>(), numHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolFwdGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), indicesIn.data_ptr<Index>() + numHotBlock, indicesOut.data_ptr<Index>() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (numHotBlock >= NumTLP) { maxPoolFwdGenericBlockKernel<T, Index, NumTLP, NumILP> <<<dim3(size / NumTLP, tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), indicesIn.data_ptr<Index>(), indicesOut.data_ptr<Index>(), numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolFwdGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(1, tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), indicesIn.data_ptr<Index>() + numHotBlock, indicesOut.data_ptr<Index>() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } } }); }); } void maxpool_bwd_cuda(torch::Tensor outFeatures, torch::Tensor inFeatures, torch::Tensor dout, torch::Tensor din, torch::Tensor indicesIn, torch::Tensor indicesOut, int size) { if (size <= 0) return; int numPlanes = inFeatures.size(1); auto dtype = inFeatures.scalar_type(); auto int_dtype = indicesIn.scalar_type(); auto stream = at::cuda::getCurrentCUDAStream(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &dout, &din, &indicesIn, &indicesOut, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (numHotBlock >= NumTLP) { maxPoolBwdVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP), dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0, stream>>>(outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), dout.data_ptr<T>(), din.data_ptr<T>(), indicesIn.data_ptr<Index>(), indicesOut.data_ptr<Index>(), numHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolBwdGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), dout.data_ptr<T>(), din.data_ptr<T>(), indicesIn.data_ptr<Index>() + numHotBlock, indicesOut.data_ptr<Index>() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; int numHotBlock = (size / NumTLP) * NumTLP; if (numHotBlock >= NumTLP) { maxPoolBwdGenericBlockKernel<T, Index, NumTLP, NumILP> <<<dim3(size / NumTLP, tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), dout.data_ptr<T>(), din.data_ptr<T>(), indicesIn.data_ptr<Index>(), indicesOut.data_ptr<Index>(), numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } if (size > numHotBlock) { maxPoolBwdGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(1, tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), inFeatures.data_ptr<T>(), dout.data_ptr<T>(), din.data_ptr<T>(), indicesIn.data_ptr<Index>() + numHotBlock, indicesOut.data_ptr<Index>() + numHotBlock, size - numHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } } }); }); } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/nms.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <boost/geometry.hpp> #include <spconv/nms_functor.h> #include <torch/script.h> #include <vector> namespace spconv { namespace functor { template <typename T, typename Index> struct NonMaxSupressionFunctor<tv::CPU, T, Index> { Index operator()(const tv::CPU &d, tv::TensorView<Index> keep, tv::TensorView<const T> boxes, T threshold, T eps) { auto ndets = boxes.dim(0); auto suppressed = std::vector<Index>(ndets); auto area = std::vector<T>(ndets); for (int i = 0; i < ndets; ++i) { area[i] = (boxes(i, 2) - boxes(i, 0) + eps) * (boxes(i, 3) - boxes(i, 1) + eps); } int i, j; T xx1, xx2, w, h, inter, ovr; int keepNum = 0; for (int _i = 0; _i < ndets; ++_i) { i = _i; if (suppressed[i] == 1) continue; keep[keepNum] = i; keepNum += 1; for (int _j = _i + 1; _j < ndets; ++_j) { j = _j; if (suppressed[j] == 1) continue; xx2 = std::min(boxes(i, 2), boxes(j, 2)); xx1 = std::max(boxes(i, 0), boxes(j, 0)); w = xx2 - xx1 + eps; if (w > 0) { xx2 = std::min(boxes(i, 3), boxes(j, 3)); xx1 = std::max(boxes(i, 1), boxes(j, 1)); h = xx2 - xx1 + eps; if (h > 0) { inter = w * h; ovr = inter / (area[i] + area[j] - inter); if (ovr >= threshold) suppressed[j] = 1; } } } } return keepNum; } }; template <typename T, typename Index> struct rotateNonMaxSupressionFunctor<tv::CPU, T, Index> { Index operator()(const tv::CPU &d, tv::TensorView<Index> keep, tv::TensorView<const T> boxCorners, tv::TensorView<const T> standupIoU, T threshold) { auto ndets = boxCorners.dim(0); auto suppressed = std::vector<Index>(ndets); int i, j; namespace bg = boost::geometry; typedef bg::model::point<T, 2, bg::cs::cartesian> point_t; typedef bg::model::polygon<point_t> polygon_t; polygon_t poly, qpoly; std::vector<polygon_t> poly_inter, poly_union; T inter_area, union_area, overlap; int keepNum = 0; for (int _i = 0; _i < ndets; ++_i) { i = _i; if (suppressed[i] == 1) continue; keep[keepNum] = i; keepNum += 1; for (int _j = _i + 1; _j < ndets; ++_j) { j = _j; if (suppressed[j] == 1) continue; if (standupIoU(i, j) <= 0.0) continue; bg::append(poly, point_t(boxCorners(i, 0, 0), boxCorners(i, 0, 1))); bg::append(poly, point_t(boxCorners(i, 1, 0), boxCorners(i, 1, 1))); bg::append(poly, point_t(boxCorners(i, 2, 0), boxCorners(i, 2, 1))); bg::append(poly, point_t(boxCorners(i, 3, 0), boxCorners(i, 3, 1))); bg::append(poly, point_t(boxCorners(i, 0, 0), boxCorners(i, 0, 1))); bg::append(qpoly, point_t(boxCorners(j, 0, 0), boxCorners(j, 0, 1))); bg::append(qpoly, point_t(boxCorners(j, 1, 0), boxCorners(j, 1, 1))); bg::append(qpoly, point_t(boxCorners(j, 2, 0), boxCorners(j, 2, 1))); bg::append(qpoly, point_t(boxCorners(j, 3, 0), boxCorners(j, 3, 1))); bg::append(qpoly, point_t(boxCorners(j, 0, 0), boxCorners(j, 0, 1))); bg::intersection(poly, qpoly, poly_inter); if (!poly_inter.empty()) { inter_area = bg::area(poly_inter.front()); bg::union_(poly, qpoly, poly_union); if (!poly_union.empty()) { // ignore invalid box union_area = bg::area(poly_union.front()); overlap = inter_area / union_area; if (overlap >= threshold) suppressed[j] = 1; poly_union.clear(); } } poly.clear(); qpoly.clear(); poly_inter.clear(); } } return keepNum; } }; } // namespace functor #define DECLARE_CPU_T_INDEX(T, Index) \ template struct functor::NonMaxSupressionFunctor<tv::CPU, T, Index>; \ template struct functor::rotateNonMaxSupressionFunctor<tv::CPU, T, Index>; #define DECLARE_CPU_INDEX(Index) \ DECLARE_CPU_T_INDEX(float, Index); \ DECLARE_CPU_T_INDEX(double, Index); DECLARE_CPU_INDEX(int); DECLARE_CPU_INDEX(long); #undef DECLARE_CPU_INDEX #undef DECLARE_CPU_T_INDEX } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/nms.cu
CUDA
// ------------------------------------------------------------------ // Deformable Convolutional Networks // Copyright (c) 2015 Microsoft // Licensed under The MIT License // Modified from MATLAB Faster R-CNN // (https://github.com/shaoqingren/faster_rcnn) // ------------------------------------------------------------------ #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/reordering.cu.h> #include <spconv/reordering.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensorview.h> #include <type_traits> #include <utility/timer.h> #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename DType> __device__ inline DType devIoU(DType const *const a, DType const *const b) { DType left = max(a[0], b[0]), right = min(a[2], b[2]); DType top = max(a[1], b[1]), bottom = min(a[3], b[3]); DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); DType interS = width * height; DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } template <typename DType, int BLOCK_THREADS> __global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh, const DType *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS); const int col_size = min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS); __shared__ DType block_boxes[BLOCK_THREADS * 5]; if (threadIdx.x < col_size) { #pragma unroll for (int i = 0; i < 5; ++i) { block_boxes[threadIdx.x * 5 + i] = dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i]; } } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x; const DType *cur_box = dev_boxes + cur_box_idx * 5; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (int i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS); dev_mask[cur_box_idx * col_blocks + col_start] = t; } }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/pillar_scatter.cu
CUDA
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/pillar_scatter_functor.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensorview.h> #include <type_traits> #include <utility/timer.h> namespace spconv { template <typename T, typename Index> __global__ void pointPillarsScatterKernel(tv::TensorView<T> canvas, tv::TensorView<const T> features, tv::TensorView<const T> coors) { auto numFeatures = features.dim(0); auto numPoints = features.dim(1); for (int i : tv::KernelLoopX<int>(numPoints)) { for (int ifeature : tv::KernelLoopY<int>(numFeatures)) { canvas(int(coors(0, i)), ifeature, int(coors(2, i)), int(coors(3, i))) = features(ifeature, i); } } } namespace functor { template <typename T, typename Index> struct PointPillarScatter<tv::GPU, T, Index> { void operator()(const tv::GPU &d, tv::TensorView<T> canvas, tv::TensorView<const T> features, tv::TensorView<const T> coors) { auto grid = dim3(tv::cuda::DivUp(features.dim(1), 32), tv::cuda::DivUp(features.dim(0), 32)); pointPillarsScatterKernel<T, Index> <<<grid, dim3(32, 32), 0, d.getStream()>>>(canvas, features, coors); TV_CHECK_CUDA_ERR(); } }; } // namespace functor #define DECLARE_GPU_SPECS_T_INDEX(T, Index) \ template struct functor::PointPillarScatter<tv::GPU, T, Index>; #define DECLARE_GPU_SPECS(T) DECLARE_GPU_SPECS_T_INDEX(T, int); DECLARE_GPU_SPECS(float); DECLARE_GPU_SPECS(double); DECLARE_GPU_SPECS(at::Half); #undef DECLARE_GPU_SPECS #undef DECLARE_GPU_SPECS_T_INDEX } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/pool_ops.cc
C++
#include <spconv/pool_ops.h> namespace spconv { torch::Tensor indiceMaxPool(torch::Tensor features, torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t numAct) { auto device = features.device().type(); auto kernelVolume = indiceNum.size(0); auto numInPlanes = features.size(1); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); torch::Tensor output = torch::zeros({numAct, numInPlanes}, options); double totalTime = 0; for (int i = 0; i < kernelVolume; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i]; if (nHot <= 0) { continue; } // auto timer = spconv::CudaContextTimer<>(); if (device == torch::kCPU) { maxpool_fwd_cpu(output, features, indicePairs[0][i], indicePairs[1][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { maxpool_fwd_cuda(output, features, indicePairs[0][i], indicePairs[1][i], nHot); } #endif else { TV_ASSERT_INVALID_ARG(false, "unknown device type"); } // totalTime += timer.report() / 1000.0; } // std::cout << "maxpool forward time " << totalTime << std::endl; return output; } torch::Tensor indiceMaxPoolBackward(torch::Tensor features, torch::Tensor outFeatures, torch::Tensor outGrad, torch::Tensor indicePairs, torch::Tensor indiceNum) { auto device = features.device().type(); auto numInPlanes = features.size(1); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); torch::Tensor inputGrad = torch::zeros(features.sizes(), options); auto kernelVolume = indiceNum.size(0); for (int i = 0; i < kernelVolume; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i]; if (nHot <= 0) { continue; } if (device == torch::kCPU) { maxpool_bwd_cpu(outFeatures, features, outGrad, inputGrad, indicePairs[0][i], indicePairs[1][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { maxpool_bwd_cuda(outFeatures, features, outGrad, inputGrad, indicePairs[0][i], indicePairs[1][i], nHot); } #endif else { TV_ASSERT_INVALID_ARG(false, "unknown device type"); } } return inputGrad; } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/reordering.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/Parallel.h> #include <spconv/reordering.h> #include <tensorview/torch_utils.h> #include <torch/script.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; void sparse_gather_cpu(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { int numPlanes = features.size(1); auto dtype = features.scalar_type(); auto int_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); Index *indices_data = indices.data_ptr<Index>(); T *buffer_data = buffer.data_ptr<T>(); const T *features_data = features.data_ptr<T>(); at::parallel_for(0, size, 0, [&](int64_t begin, int64_t end) { for (int i = begin; i < end; ++i) { std::memcpy(buffer_data + i * numPlanes, features_data + indices_data[i] * numPlanes, sizeof(T) * numPlanes); } }); }); }); } void sparse_scatter_add_cpu(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { int numPlanes = outFeatures.size(1); auto dtype = outFeatures.scalar_type(); auto int_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); tv::DispatchTorch<int_types_t>()(int_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); Index *indices_data = indices.data_ptr<Index>(); const T *buffer_data = buffer.data_ptr<T>(); T *features_data = outFeatures.data_ptr<T>(); at::parallel_for(0, size, 0, [&](int64_t begin, int64_t end) { const T *buf = buffer.data_ptr<T>(); T *out = outFeatures.data_ptr<T>(); for (int i = begin; i < end; ++i) { buf = buffer_data + i * numPlanes; out = features_data + indices_data[i] * numPlanes; for (int j = 0; j < numPlanes; ++j) { out[j] += buf[j]; } } }); }); }); } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/reordering.cu
CUDA
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <ATen/ATen.h> #include <chrono> #include <limits> #include <spconv/reordering.cu.h> #include <spconv/reordering.h> #include <tensorview/cuda_utils.h> #include <tensorview/kernel_utils.h> #include <tensorview/mp_helper.h> #include <tensorview/tensor.h> #include <tensorview/tensorview.h> #include <tensorview/torch_utils.h> #include <type_traits> #include <utility/timer.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; template <typename T> struct half_vec{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; template <typename T> struct half_vec_sadd{ using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; }; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>([=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("gatherVecKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; gatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, gatherGenericKernel<T, Index, NumTLP, NumILP>)); tv::ssprint("gatherGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)); tv::ssprint("scatterAddVecBlockKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { scatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; scatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); #ifdef TV_LOG_KERNEL_INFO cudaFuncAttributes attr; checkCudaErrors(cudaFuncGetAttributes( &attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)); tv::ssprint("notfound scatterAddGenericKernel<", tv::type_s<T>, tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs); #endif TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = features.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = typename half_vec<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel"); } if (size - nHotBlock > 0) { batchGatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel"); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchGatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = TV_DECLTYPE(TValue); using vecload_type_t = typename half_vec_sadd<T>::type; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = TV_DECLTYPE(IndexValue); bool notFound = true; constexpr int vecloadFactor = 1; // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchScatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/spconv/spconv_ops.cc
C++
#include <spconv/spconv_ops.h> namespace spconv { std::vector<torch::Tensor> getIndicePairs(torch::Tensor indices, int64_t batchSize, std::vector<int64_t> outSpatialShape, std::vector<int64_t> spatialShape, std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> outPadding, int64_t _subM, int64_t _transpose, int64_t _useHash) { // auto timer = spconv::CudaContextTimer<>(); bool subM = _subM != 0; bool transpose = _transpose != 0; auto NDim = kernelSize.size(); // CPU always use hash (tsl::robin_map). bool useHash = _useHash != 0 || indices.device().type() == torch::kCPU; auto numAct = indices.size(0); auto coorDim = indices.size(1) - 1; // batchIdx + xyz TV_ASSERT_RT_ERR(NDim == coorDim, "error"); TV_ASSERT_RT_ERR(kernelSize.size() == coorDim, "error"); TV_ASSERT_RT_ERR(outSpatialShape.size() == coorDim, "error"); TV_ASSERT_RT_ERR(stride.size() == coorDim, "error"); TV_ASSERT_RT_ERR(padding.size() == coorDim, "error"); TV_ASSERT_RT_ERR(outPadding.size() == coorDim, "error"); TV_ASSERT_RT_ERR(dilation.size() == coorDim, "error"); auto kernelVolume = kernelSize[0]; for (int i = 1; i < kernelSize.size(); ++i) { kernelVolume *= kernelSize[i]; } TV_ASSERT_RT_ERR(kernelVolume <= 4096, "error"); auto outputVolume = outSpatialShape[0]; for (int i = 1; i < outSpatialShape.size(); ++i) { outputVolume *= outSpatialShape[i]; } std::string msg = "due to limits of cuda hash, the volume of dense space " "include batch size "; msg += "must less than std::numeric_limits<int>::max() = 2e9"; TV_ASSERT_RT_ERR(batchSize * outputVolume < std::numeric_limits<int>::max(), msg); torch::Tensor indicePairs = torch::full({2, kernelVolume, numAct}, -1, torch::dtype(torch::kInt32).device(indices.device())); torch::Tensor indiceNum = torch::zeros( {kernelVolume}, torch::dtype(torch::kInt32).device(indices.device())); auto gridSize = batchSize * outputVolume; if (useHash) { gridSize = batchSize; } torch::Tensor gridOut = torch::full( {gridSize}, -1, torch::dtype(torch::kInt32).device(indices.device())); gridOut = gridOut.view({batchSize, -1}); int64_t numActOut = -1; for (int i = 0; i < NDim; ++i) { if (subM) { padding[i] = kernelSize[i] / 2; stride[i] = 1; } } // tv::ssprint("prepare", timer.report() / 1000.0); if (subM) { if (indices.device().type() == torch::kCPU) { numActOut = create_submconv_indice_pair_cpu( indices, gridOut, indicePairs, indiceNum, kernelSize, stride, padding, dilation, outSpatialShape, transpose, false, useHash); } #ifdef TV_CUDA else if (indices.device().type() == torch::kCUDA) { numActOut = create_submconv_indice_pair_cuda( indices, gridOut, indicePairs, indiceNum, kernelSize, stride, padding, dilation, outSpatialShape, transpose, false, useHash); if (numActOut == -1) { auto device = indices.device(); indicePairs = indicePairs.to({torch::kCPU}); indiceNum = indiceNum.to({torch::kCPU}); indices = indices.to({torch::kCPU}); numActOut = create_submconv_indice_pair_cpu( indices, gridOut, indicePairs, indiceNum, kernelSize, stride, padding, dilation, outSpatialShape, transpose, false, useHash); return {indices.to(device), indicePairs.to(device), indiceNum.to(device)}; } } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } // tv::ssprint("subm", timer.report() / 1000.0); return {indices, indicePairs, indiceNum}; } else { auto indicePairUnique = torch::full( {indicePairs.numel() / 2 + 1}, std::numeric_limits<int>::max(), torch::dtype(torch::kInt32).device(indices.device())); torch::Tensor outInds = torch::zeros({numAct * kernelVolume, coorDim + 1}, torch::dtype(torch::kInt32).device(indices.device())); if (indices.device().type() == torch::kCPU) { numActOut = create_conv_indice_pair_cpu( indices, outInds, gridOut, indicePairs, indiceNum, kernelSize, stride, padding, dilation, outSpatialShape, transpose, false, useHash); } #ifdef TV_CUDA else if (indices.device().type() == torch::kCUDA) { numActOut = create_conv_indice_pair_p1_cuda( indices, indicePairs, indiceNum, indicePairUnique, kernelSize, stride, padding, dilation, outSpatialShape, transpose); if (numActOut > 0) { auto res = torch::_unique(indicePairUnique); indicePairUnique = std::get<0>(res); numActOut = create_conv_indice_pair_p2_cuda( indices, outInds, gridOut, indicePairs, indiceNum, indicePairUnique, outSpatialShape, transpose, false, useHash); if (numActOut == -1) { auto device = indices.device(); outInds = outInds.to({torch::kCPU}); indicePairs = indicePairs.to({torch::kCPU}); indiceNum = indiceNum.to({torch::kCPU}); indices = indices.to({torch::kCPU}); numActOut = create_conv_indice_pair_cpu( indices, outInds, gridOut, indicePairs, indiceNum, kernelSize, stride, padding, dilation, outSpatialShape, transpose, false, useHash); return {outInds.to(device).slice(0, 0, numActOut), indicePairs.to(device), indiceNum.to(device)}; } } } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } return {outInds.slice(0, 0, numActOut), indicePairs, indiceNum}; } } torch::Tensor indiceConv(torch::Tensor features, torch::Tensor filters, torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t numActOut, int64_t _inverse, int64_t _subM, int64_t algo) { auto kernelVolume = indiceNum.size(0); switch (algo) { case kBatchGemmGather: case kBatch: { if (kernelVolume != 1) { return indiceConvBatch(features, filters, indicePairs, indiceNum, numActOut, _inverse, _subM, algo != kBatchGemmGather); } else { break; } } case kNative: break; default: TV_THROW_RT_ERR("unknown algo"); } // auto timer = spconv::CudaContextTimer<>(); bool subM = _subM != 0; bool inverse = _inverse != 0; auto device = features.device().type(); auto ndim = filters.dim() - 2; auto numInPlanes = features.size(1); auto numOutPlanes = filters.size(ndim + 1); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); torch::Tensor output = torch::zeros({numActOut, numOutPlanes}, options); filters = filters.view({-1, numInPlanes, numOutPlanes}); // init for subM int indicePairMaxOffset = kernelVolume / 2; int indicePairMaxSize = numActOut; if (subM) { // the center index of subm conv don't need gather and scatter // add. torch::mm_out(output, features, filters[indicePairMaxOffset]); // get indice pair second max size based on subM symmetric property indicePairMaxSize = *std::max_element(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + indicePairMaxOffset); if (indicePairMaxSize == 0) { return output; } } else { indicePairMaxSize = *std::max_element(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + kernelVolume); } torch::Tensor inputBuffer = torch::empty({indicePairMaxSize, numInPlanes}, options); torch::Tensor outputBuffer = torch::empty({indicePairMaxSize, numOutPlanes}, options); double totalGatherTime = 0; double totalGEMMTime = 0; double totalSAddTime = 0; // tv::ssprint("first subm gemm time", timer.report() / 1000.0); for (int i = 0; i < kernelVolume; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i]; if (nHot <= 0 || (subM && i == indicePairMaxOffset)) { continue; } // TODO torch::from_blob is a little slow auto outputBufferBlob = torch::from_blob(outputBuffer.data_ptr(), {nHot, numOutPlanes}, options); auto inputBufferBlob = torch::from_blob(inputBuffer.data_ptr(), {nHot, numInPlanes}, options); if (device == torch::kCPU) { sparse_gather_cpu(inputBuffer, features, indicePairs[inverse][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_gather_cuda(inputBuffer, features, indicePairs[inverse][i], nHot); /* slower than SparseGatherFunctor, may due to int->long conversion auto indicePairLong = indicePairs[i][inverse].to(torch::kInt64); auto indicePairBlob = torch::from_blob(indicePairLong.data<long>(), {nHot}, indicePairOptions); torch::index_select_out(inputBufferBlob, features, 0, indicePairBlob);*/ } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } // totalGatherTime += timer.report() / 1000.0; torch::mm_out(outputBufferBlob, inputBufferBlob, filters[i]); // totalGEMMTime += timer.report() / 1000.0; if (device == torch::kCPU) { sparse_scatter_add_cpu(outputBuffer, output, indicePairs[!inverse][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_scatter_add_cuda(outputBuffer, output, indicePairs[!inverse][i], nHot); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } // totalSAddTime += timer.report() / 1000.0; } // tv::ssprint(totalGatherTime, totalGEMMTime, totalSAddTime); return output; } torch::Tensor indiceConvBatch(torch::Tensor features, torch::Tensor filters, torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t numActOut, int64_t _inverse, int64_t _subM, bool batchScatter) { bool subM = _subM != 0; bool inverse = _inverse != 0; auto device = features.device().type(); auto ndim = filters.dim() - 2; auto kernelVolume = indiceNum.size(0); TV_ASSERT_INVALID_ARG(kernelVolume > 1, "error"); auto numInPlanes = features.size(1); auto numOutPlanes = filters.size(ndim + 1); // auto timer = spconv::CudaContextTimer<>(); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto indicePairNumVec = std::vector<int>(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + kernelVolume); auto indicePairMaxSizeIter = std::max_element(indicePairNumVec.begin(), indicePairNumVec.end()); int indicePairMaxOffset = indicePairMaxSizeIter - indicePairNumVec.begin(); int indicePairMaxSize = *indicePairMaxSizeIter; std::nth_element(indicePairNumVec.begin(), indicePairNumVec.begin() + 1, indicePairNumVec.end(), std::greater<int>()); int indicePairTop2Size = indicePairNumVec[1]; auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); auto indice_dtype = indicePairs.scalar_type(); torch::Tensor output = torch::zeros({numActOut, numOutPlanes}, options); // we cant use batch conv in subm directly because // number of indice in the center of filter is much more than other // filter location. // so we first use top2 indice num to do batch conv, then // do native conv (gemm) in center. int bufferSize = subM ? indicePairTop2Size : indicePairMaxSize; int maxKernelVolumePart = kernelVolume; std::vector<std::pair<int, int>> part_ranges = {{0, kernelVolume}}; filters = filters.view({kernelVolume, numInPlanes, numOutPlanes}); if (subM) { maxKernelVolumePart = std::max(indicePairMaxOffset, int(kernelVolume - indicePairMaxOffset - 1)); part_ranges = {{0, indicePairMaxOffset}, {indicePairMaxOffset + 1, kernelVolume}}; torch::mm_out(output, features, filters[indicePairMaxOffset]); if (indicePairTop2Size == 0) { return output; } } // tv::ssprint("first subm gemm time", timer.report() / 1000.0); double totalGatherTime = 0; double totalGEMMTime = 0; double totalSAddTime = 0; torch::Tensor inputBuffer = torch::empty({maxKernelVolumePart, bufferSize, numInPlanes}, options); torch::Tensor outputBuffer = torch::empty({maxKernelVolumePart, bufferSize, numOutPlanes}, options); for (auto &range : part_ranges) { int start = range.first; int end = range.second; int length = end - start; int64_t size = length * bufferSize; auto inputBufferPart = tv::torch_slice_first_axis(inputBuffer, 0, length); auto outputBufferPart = tv::torch_slice_first_axis(outputBuffer, 0, length); auto indicePairs1Part = tv::torch_slice_first_axis(indicePairs[inverse], start, end); auto indicePairs2Part = tv::torch_slice_first_axis(indicePairs[!inverse], start, end); auto filtersPart = tv::torch_slice_first_axis(filters, start, end); if (device == torch::kCPU) { TV_THROW_INVALID_ARG("unknown device type"); } #ifdef TV_CUDA else if (device == torch::kCUDA) { batch_sparse_gather_cuda(inputBufferPart, features, indicePairs1Part, size); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } // totalGatherTime += timer.report() / 1000.0; torch::bmm_out(outputBufferPart, inputBufferPart, filtersPart); // totalGEMMTime += timer.report() / 1000.0; if (batchScatter) { if (device == torch::kCPU) { TV_THROW_INVALID_ARG("unknown device type"); } #ifdef TV_CUDA else if (device == torch::kCUDA) { batch_sparse_scatter_add_cuda(outputBufferPart, output, indicePairs2Part, size); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } } else { for (int i = 0; i < length; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i + start]; if (nHot <= 0) { continue; } if (device == torch::kCPU) { sparse_scatter_add_cpu(outputBufferPart[i], output, indicePairs2Part[i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_scatter_add_cuda(outputBufferPart[i], output, indicePairs2Part[i], nHot); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } } } // totalSAddTime += timer.report() / 1000.0; } // tv::ssprint(totalGatherTime, totalGEMMTime, totalSAddTime); return output; } std::vector<torch::Tensor> indiceConvBackward(torch::Tensor features, torch::Tensor filters, torch::Tensor outGrad, torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t _inverse, int64_t _subM, int64_t algo) { auto kernelVolume = indiceNum.size(0); switch (algo) { case kBatchGemmGather: case kBatch: { if (kernelVolume != 1) { return indiceConvBackwardBatch(features, filters, outGrad, indicePairs, indiceNum, _inverse, _subM, algo != kBatchGemmGather); } else { break; } } case kNative: break; default: TV_THROW_RT_ERR("unknown algo"); } bool subM = _subM != 0; bool inverse = _inverse != 0; auto device = features.device().type(); auto ndim = filters.dim() - 2; auto numInPlanes = features.size(1); auto numOutPlanes = filters.size(ndim + 1); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); auto filterShape = filters.sizes(); torch::Tensor inputGrad = torch::zeros(features.sizes(), options); torch::Tensor filtersGrad = torch::empty(filterShape, options); filters = filters.view({-1, numInPlanes, numOutPlanes}); filtersGrad = filtersGrad.view({-1, numInPlanes, numOutPlanes}); // init for subM int indicePairMaxOffset = kernelVolume / 2; int indicePairMaxSize = indicePairNumCpu.data_ptr<int>()[indicePairMaxOffset]; if (subM) { auto filterGradSub = filtersGrad[indicePairMaxOffset]; torch::mm_out(filterGradSub, features.t(), outGrad); torch::mm_out(inputGrad, outGrad, filters[indicePairMaxOffset].t()); // get indice pair second max size based on subM symmetric property indicePairMaxSize = *std::max_element(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + indicePairMaxOffset); if (indicePairMaxSize == 0) { return {inputGrad, filtersGrad.view(filterShape)}; } } else { indicePairMaxSize = *std::max_element(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + kernelVolume); } torch::Tensor inputBuffer = torch::empty({indicePairMaxSize, numInPlanes}, options); torch::Tensor outputBuffer = torch::empty({indicePairMaxSize, numOutPlanes}, options); for (int i = 0; i < kernelVolume; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i]; if (nHot <= 0 || (subM && i == indicePairMaxOffset)) { continue; } if (device == torch::kCPU) { sparse_gather_cpu(inputBuffer, features, indicePairs[inverse][i], nHot); sparse_gather_cpu(outputBuffer, outGrad, indicePairs[!inverse][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_gather_cuda(inputBuffer, features, indicePairs[inverse][i], nHot); sparse_gather_cuda(outputBuffer, outGrad, indicePairs[!inverse][i], nHot); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } auto filterGradSub = filtersGrad[i]; auto outputBufferBlob = torch::from_blob(outputBuffer.data_ptr(), {nHot, numOutPlanes}, options); auto inputBufferBlob = torch::from_blob(inputBuffer.data_ptr(), {nHot, numInPlanes}, options); torch::mm_out(filterGradSub, inputBufferBlob.t(), outputBufferBlob); torch::mm_out(inputBufferBlob, outputBufferBlob, filters[i].t()); if (device == torch::kCPU) { sparse_scatter_add_cpu(inputBuffer, inputGrad, indicePairs[inverse][i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_scatter_add_cuda(inputBuffer, inputGrad, indicePairs[inverse][i], nHot); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } } return {inputGrad, filtersGrad.view(filterShape)}; } std::vector<torch::Tensor> indiceConvBackwardBatch(torch::Tensor features, torch::Tensor filters, torch::Tensor outGrad, torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t _inverse, int64_t _subM, bool batchScatter) { bool subM = _subM != 0; bool inverse = _inverse != 0; auto device = features.device().type(); auto ndim = filters.dim() - 2; auto kernelVolume = indiceNum.size(0); TV_ASSERT_INVALID_ARG(kernelVolume > 1, "error"); auto numInPlanes = features.size(1); auto numOutPlanes = filters.size(ndim + 1); auto indicePairNumCpu = indiceNum.to({torch::kCPU}); auto indicePairNumVec = std::vector<int>(indicePairNumCpu.data_ptr<int>(), indicePairNumCpu.data_ptr<int>() + kernelVolume); auto indicePairMaxSizeIter = std::max_element(indicePairNumVec.begin(), indicePairNumVec.end()); int indicePairMaxOffset = indicePairMaxSizeIter - indicePairNumVec.begin(); int indicePairMaxSize = *indicePairMaxSizeIter; std::nth_element(indicePairNumVec.begin(), indicePairNumVec.begin() + 1, indicePairNumVec.end(), std::greater<int>()); int indicePairTop2Size = indicePairNumVec[1]; auto options = torch::TensorOptions().dtype(features.dtype()).device(features.device()); auto indice_dtype = indicePairs.scalar_type(); auto filterShape = filters.sizes(); torch::Tensor inputGrad = torch::zeros(features.sizes(), options); torch::Tensor filtersGrad = torch::zeros(filterShape, options); int bufferSize = subM ? indicePairTop2Size : indicePairMaxSize; filters = filters.view({-1, numInPlanes, numOutPlanes}); filtersGrad = filtersGrad.view({-1, numInPlanes, numOutPlanes}); std::vector<std::pair<int, int>> part_ranges = {{0, kernelVolume}}; int maxKernelVolumePart = kernelVolume; if (subM) { maxKernelVolumePart = std::max(indicePairMaxOffset, int(kernelVolume - indicePairMaxOffset - 1)); part_ranges = {{0, indicePairMaxOffset}, {indicePairMaxOffset + 1, kernelVolume}}; auto filtersGradSub = filtersGrad[indicePairMaxOffset]; auto filtersSub = filters[indicePairMaxOffset]; torch::mm_out(filtersGradSub, features.t(), outGrad); torch::mm_out(inputGrad, outGrad, filtersSub.t()); if (indicePairTop2Size == 0) { return {inputGrad, filtersGrad.view(filterShape)}; } } torch::Tensor inputBuffer = torch::zeros({maxKernelVolumePart, bufferSize, numInPlanes}, options); torch::Tensor outputBuffer = torch::zeros({maxKernelVolumePart, bufferSize, numOutPlanes}, options); for (auto &range : part_ranges) { int start = range.first; int end = range.second; int length = end - start; int64_t size = length * bufferSize; auto inputBufferPart = tv::torch_slice_first_axis(inputBuffer, 0, length); auto outputBufferPart = tv::torch_slice_first_axis(outputBuffer, 0, length); auto indicePairs1Part = tv::torch_slice_first_axis(indicePairs[inverse], start, end); auto indicePairs2Part = tv::torch_slice_first_axis(indicePairs[!inverse], start, end); auto filtersPart = tv::torch_slice_first_axis(filters, start, end); auto filtersGradPart = tv::torch_slice_first_axis(filtersGrad, start, end); if (device == torch::kCPU) { TV_THROW_INVALID_ARG("unknown device type"); } #ifdef TV_CUDA else if (device == torch::kCUDA) { batch_sparse_gather_cuda(inputBufferPart, features, indicePairs1Part, size); batch_sparse_gather_cuda(outputBufferPart, outGrad, indicePairs2Part, size); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } // filters: KV, I, O, inputBuffer: [KV, buffer, I] // outputBuffer: [KV, buffer, O] torch::bmm_out(filtersGradPart, inputBufferPart.permute({0, 2, 1}), outputBufferPart); torch::bmm_out(inputBuffer, outputBufferPart, filtersPart.permute({0, 2, 1})); if (batchScatter) { if (device == torch::kCPU) { TV_THROW_INVALID_ARG("unknown device type"); } #ifdef TV_CUDA else if (device == torch::kCUDA) { batch_sparse_scatter_add_cuda(inputBufferPart, inputGrad, indicePairs1Part, size); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } } else { for (int i = 0; i < length; ++i) { auto nHot = indicePairNumCpu.data_ptr<int>()[i + start]; if (nHot <= 0) { continue; } if (device == torch::kCPU) { sparse_scatter_add_cpu(inputBufferPart[i], inputGrad, indicePairs1Part[i], nHot); } #ifdef TV_CUDA else if (device == torch::kCUDA) { sparse_scatter_add_cuda(inputBufferPart[i], inputGrad, indicePairs1Part[i], nHot); } #endif else { TV_THROW_INVALID_ARG("unknown device type"); } } } } return {inputGrad, filtersGrad.view(filterShape)}; } } // namespace spconv
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/utils/all.cc
C++
// Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <spconv/box_iou.h> #include <spconv/nms.h> #include <spconv/point2voxel.h> namespace py = pybind11; using namespace pybind11::literals; PYBIND11_MODULE(spconv_utils, m) { m.doc() = "util pybind11 functions for spconv"; #ifdef TV_CUDA m.def("non_max_suppression", &spconv::non_max_suppression<double>, py::return_value_policy::reference_internal, "bbox iou", "boxes"_a = 1, "keep_out"_a = 2, "nms_overlap_thresh"_a = 3, "device_id"_a = 4); m.def("non_max_suppression", &spconv::non_max_suppression<float>, py::return_value_policy::reference_internal, "bbox iou", "boxes"_a = 1, "keep_out"_a = 2, "nms_overlap_thresh"_a = 3, "device_id"_a = 4); #endif m.def("non_max_suppression_cpu", &spconv::non_max_suppression_cpu<double>, py::return_value_policy::reference_internal, "bbox iou", "boxes"_a = 1, "order"_a = 2, "nms_overlap_thresh"_a = 3, "eps"_a = 4); m.def("non_max_suppression_cpu", &spconv::non_max_suppression_cpu<float>, py::return_value_policy::reference_internal, "bbox iou", "boxes"_a = 1, "order"_a = 2, "nms_overlap_thresh"_a = 3, "eps"_a = 4); m.def("rotate_non_max_suppression_cpu", &spconv::rotate_non_max_suppression_cpu<float>, py::return_value_policy::reference_internal, "bbox iou", "box_corners"_a = 1, "order"_a = 2, "standup_iou"_a = 3, "thresh"_a = 4); m.def("rotate_non_max_suppression_cpu", &spconv::rotate_non_max_suppression_cpu<double>, py::return_value_policy::reference_internal, "bbox iou", "box_corners"_a = 1, "order"_a = 2, "standup_iou"_a = 3, "thresh"_a = 4); m.def("rbbox_iou", &spconv::rbbox_iou<double>, py::return_value_policy::reference_internal, "rbbox iou", "box_corners"_a = 1, "qbox_corners"_a = 2, "standup_iou"_a = 3, "standup_thresh"_a = 4); m.def("rbbox_iou", &spconv::rbbox_iou<float>, py::return_value_policy::reference_internal, "rbbox iou", "box_corners"_a = 1, "qbox_corners"_a = 2, "standup_iou"_a = 3, "standup_thresh"_a = 4); m.def("rbbox_intersection", &spconv::rbbox_intersection<double>, py::return_value_policy::reference_internal, "rbbox iou", "box_corners"_a = 1, "qbox_corners"_a = 2, "standup_iou"_a = 3, "standup_thresh"_a = 4); m.def("rbbox_intersection", &spconv::rbbox_intersection<float>, py::return_value_policy::reference_internal, "rbbox iou", "box_corners"_a = 1, "qbox_corners"_a = 2, "standup_iou"_a = 3, "standup_thresh"_a = 4); m.def("points_to_voxel_3d_np", &spconv::points_to_voxel_3d_np<float, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "coors"_a = 4, "num_points_per_voxel"_a = 5, "coor_to_voxelidx"_a = 6, "voxel_size"_a = 7, "coors_range"_a = 8, "max_points"_a = 9, "max_voxels"_a = 10); m.def("points_to_voxel_3d_np", &spconv::points_to_voxel_3d_np<double, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "coors"_a = 4, "num_points_per_voxel"_a = 5, "coor_to_voxelidx"_a = 6, "voxel_size"_a = 7, "coors_range"_a = 8, "max_points"_a = 9, "max_voxels"_a = 10); m.def("points_to_voxel_3d_np_mean", &spconv::points_to_voxel_3d_np_mean<float, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "means"_a = 4, "coors"_a = 5, "num_points_per_voxel"_a = 6, "coor_to_voxelidx"_a = 7, "voxel_size"_a = 8, "coors_range"_a = 9, "max_points"_a = 10, "max_voxels"_a = 11); m.def("points_to_voxel_3d_np_mean", &spconv::points_to_voxel_3d_np_mean<double, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "means"_a = 4, "coors"_a = 5, "num_points_per_voxel"_a = 6, "coor_to_voxelidx"_a = 7, "voxel_size"_a = 8, "coors_range"_a = 9, "max_points"_a = 10, "max_voxels"_a = 11); m.def("points_to_voxel_3d_with_filtering", &spconv::points_to_voxel_3d_with_filtering<float, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "voxel_mask"_a = 4, "mins"_a = 5, "maxs"_a = 6, "coors"_a = 7, "num_points_per_voxel"_a = 8, "coor_to_voxelidx"_a = 9, "voxel_size"_a = 10, "coors_range"_a = 11, "max_points"_a = 12, "max_voxels"_a = 13, "block_factor"_a = 14, "block_size"_a = 15, "height_threshold"_a = 16, "height_high_threshold"_a = 17); m.def("points_to_voxel_3d_with_filtering", &spconv::points_to_voxel_3d_with_filtering<float, 3>, "matrix tensor_square", "points"_a = 1, "voxels"_a = 2, "voxel_point_mask"_a = 3, "voxel_mask"_a = 4, "mins"_a = 5, "maxs"_a = 6, "coors"_a = 7, "num_points_per_voxel"_a = 8, "coor_to_voxelidx"_a = 9, "voxel_size"_a = 10, "coors_range"_a = 11, "max_points"_a = 12, "max_voxels"_a = 13, "block_factor"_a = 14, "block_size"_a = 15, "height_threshold"_a = 16, "height_high_threshold"_a = 17); }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/src/utils/nms.cu
CUDA
// ------------------------------------------------------------------ // Deformable Convolutional Networks // Copyright (c) 2015 Microsoft // Licensed under The MIT License // Modified from MATLAB Faster R-CNN // (https://github.com/shaoqingren/faster_rcnn) // ------------------------------------------------------------------ #include <cuda_runtime.h> #include <iostream> #include <spconv/nms_gpu.h> #include <vector> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename DType> __device__ inline DType devIoU(DType const *const a, DType const *const b) { DType left = max(a[0], b[0]), right = min(a[2], b[2]); DType top = max(a[1], b[1]), bottom = min(a[3], b[3]); DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); DType interS = width * height; DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } template <typename DType, int BLOCK_THREADS> __global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh, const DType *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS); const int col_size = min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS); __shared__ DType block_boxes[BLOCK_THREADS * 5]; if (threadIdx.x < col_size) { #pragma unroll for (int i = 0; i < 5; ++i) { block_boxes[threadIdx.x * 5 + i] = dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i]; } } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x; const DType *cur_box = dev_boxes + cur_box_idx * 5; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (int i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } template <typename DType, int BLOCK_THREADS> int _nms_gpu(int *keep_out, const DType *boxes_host, int boxes_num, int boxes_dim, DType nms_overlap_thresh, int device_id) { _set_device(device_id); DType *boxes_dev = NULL; unsigned long long *mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, BLOCK_THREADS); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(DType))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(DType), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, BLOCK_THREADS), DIVUP(boxes_num, BLOCK_THREADS)); dim3 threads(BLOCK_THREADS); nms_kernel<DType, BLOCK_THREADS> <<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / BLOCK_THREADS; int inblock = i % BLOCK_THREADS; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); return num_to_keep; } // template<> template int _nms_gpu<float, threadsPerBlock>(int *keep_out, const float *boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id); // template<> template int _nms_gpu<double, threadsPerBlock>(int *keep_out, const double *boxes_host, int boxes_num, int boxes_dim, double nms_overlap_thresh, int device_id);
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/test/fake_dist_train.py
Python
import time from pathlib import Path import fire import numpy as np import torch import torch.nn.functional as F import tqdm from torch import distributed, nn from torch.utils import data from torch.utils.data import DataLoader, Dataset from torchvision import datasets, transforms import horovod.torch as hvd import spconv from spconv.test_utils import generate_sparse_data class FakeSparseDataset(Dataset): def __len__(self): return 500 def __getitem__(self, idx): data_ranges = { 0: [-1, 1], 1: [0, 2], 2: [-2, 0], 3: [-2, -2], } l = np.random.randint(0, 4, size=[2]) data = generate_sparse_data([16, 64, 64], [16 * 64 * 64 // 2], 3, data_range=data_ranges[l[0]], with_dense=False) data2 = generate_sparse_data([16, 64, 64], [16 * 64 * 64 // 2], 3, data_range=data_ranges[l[1]], with_dense=False) features = np.ascontiguousarray(data["features"]).astype(np.float32) indices = np.ascontiguousarray( data["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features2 = np.ascontiguousarray(data2["features"]).astype(np.float32) indices2 = np.ascontiguousarray( data2["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features = np.ascontiguousarray(np.concatenate([features, features2])) indices = np.ascontiguousarray(np.concatenate([indices, indices2])) return features, indices, l class FakeClassifier(nn.Module): def __init__(self): super().__init__() self.net = spconv.SparseSequential( spconv.SubMConv3d(3, 8, 3, indice_key="subm1", padding=1, use_hash=False), nn.BatchNorm1d(8), nn.ReLU(), spconv.SparseConv3d(8, 16, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(16), nn.ReLU(), spconv.SubMConv3d(16, 16, 3, indice_key="subm2", padding=1, use_hash=False), nn.BatchNorm1d(16), nn.ReLU(), spconv.SparseConv3d(16, 32, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(32), nn.ReLU(), spconv.SubMConv3d(32, 32, 3, indice_key="subm3", padding=1, use_hash=False), nn.BatchNorm1d(32), nn.ReLU(), spconv.SparseConv3d(32, 64, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(64), nn.ReLU(), spconv.SubMConv3d(64, 64, 3, indice_key="subm4", padding=1, use_hash=False), nn.BatchNorm1d(64), nn.ReLU(), spconv.ToDense() # [64, 2, 8, 8] ) self.linear = nn.Linear(64 * 2 * 8 * 8, 4) def forward(self, features, indices): indices = indices.int() x = spconv.SparseConvTensor(features, indices, [16, 64, 64], 2) x = self.net(x) x = x.view(2, -1) x = self.linear(x) return x def run(): hvd.init() torch.cuda.set_device(hvd.local_rank()) np.random.seed(50051 + hvd.local_rank()) ds = FakeSparseDataset() device = torch.device('cuda') model = FakeClassifier() model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) compression = hvd.Compression.none optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters(), compression=compression, op=hvd.Average) for i in tqdm.tqdm(list(range(100))): # for j in range(4): # features, indices, label = ds[(i * 4 + j) % len(ds)] features, indices, label = ds[i % len(ds)] features_t = torch.from_numpy(features) indices_t = torch.from_numpy(indices) features_t = features_t.to(device) indices_t = indices_t.to(device) target = torch.from_numpy(label).to(device) output = model(features_t, indices_t) # print(output.shape) loss = F.cross_entropy(output, target) optimizer.zero_grad() loss.backward() optimizer.step() def dev(): ds = FakeSparseDataset() for i in range(10): features, indices, label = ds[i] print(indices[:10]) features_t = torch.from_numpy(features.astype(np.float32)).cuda() indices_t = torch.from_numpy(indices.astype(np.int32)).cuda() net = FakeClassifier().cuda() net(features_t, indices_t) def main(): run() if __name__ == "__main__": fire.Fire(main)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/test/fake_train.py
Python
import time from pathlib import Path import fire import numpy as np import torch import torch.nn.functional as F import tqdm from torch import distributed, nn from torch.utils import data from torch.utils.data import DataLoader, Dataset from torchvision import datasets, transforms import spconv from spconv.test_utils import generate_sparse_data class FakeSparseDataset(Dataset): def __len__(self): return 500 def __getitem__(self, idx): data_ranges = { 0: [-1, 1], 1: [0, 2], 2: [-2, 0], 3: [-2, -2], } l = np.random.randint(0, 4, size=[2]) data = generate_sparse_data([16, 64, 64], [16 * 64 * 64 // 2], 3, data_range=data_ranges[l[0]], with_dense=False) data2 = generate_sparse_data([16, 64, 64], [16 * 64 * 64 // 2], 3, data_range=data_ranges[l[1]], with_dense=False) features = np.ascontiguousarray(data["features"]).astype(np.float32) indices = np.ascontiguousarray( data["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features2 = np.ascontiguousarray(data2["features"]).astype(np.float32) indices2 = np.ascontiguousarray( data2["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features = np.ascontiguousarray(np.concatenate([features, features2])) indices = np.ascontiguousarray(np.concatenate([indices, indices2])) return features, indices, l class FakeClassifier(nn.Module): def __init__(self): super().__init__() self.net = spconv.SparseSequential( spconv.SubMConv3d(3, 8, 3, indice_key="subm1", padding=1, use_hash=False), nn.BatchNorm1d(8), nn.ReLU(), spconv.SparseConv3d(8, 16, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(16), nn.ReLU(), spconv.SubMConv3d(16, 16, 3, indice_key="subm2", padding=1, use_hash=False), nn.BatchNorm1d(16), nn.ReLU(), spconv.SparseConv3d(16, 32, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(32), nn.ReLU(), spconv.SubMConv3d(32, 32, 3, indice_key="subm3", padding=1, use_hash=False), nn.BatchNorm1d(32), nn.ReLU(), spconv.SparseConv3d(32, 64, 3, stride=2, padding=1, use_hash=False), nn.BatchNorm1d(64), nn.ReLU(), spconv.SubMConv3d(64, 64, 3, indice_key="subm4", padding=1, use_hash=False), nn.BatchNorm1d(64), nn.ReLU(), spconv.ToDense() # [64, 2, 8, 8] ) self.linear = nn.Linear(64 * 2 * 8 * 8, 4) def forward(self, features, indices): indices = indices.int() x = spconv.SparseConvTensor(features, indices, [16, 64, 64], 2) x = self.net(x) x = x.view(2, -1) x = self.linear(x) return x def run(): np.random.seed(50051) ds = FakeSparseDataset() device = torch.device('cuda') model = FakeClassifier() model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) for i in tqdm.tqdm(list(range(100))): # for j in range(4): # features, indices, label = ds[(i * 4 + j) % len(ds)] features, indices, label = ds[i % len(ds)] features_t = torch.from_numpy(features) indices_t = torch.from_numpy(indices) features_t = features_t.to(device) indices_t = indices_t.to(device) target = torch.from_numpy(label).to(device) output = model(features_t, indices_t) # print(output.shape) loss = F.cross_entropy(output, target) optimizer.zero_grad() loss.backward() optimizer.step() def dev(): ds = FakeSparseDataset() for i in range(10): features, indices, label = ds[i] print(indices[:10]) features_t = torch.from_numpy(features.astype(np.float32)).cuda() indices_t = torch.from_numpy(indices.astype(np.int32)).cuda() net = FakeClassifier().cuda() net(features_t, indices_t) def main(): run() if __name__ == "__main__": fire.Fire(main)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/test/src/catch_main.cpp
C++
// 000-CatchMain.cpp // In a Catch project with multiple files, dedicate one file to compile the // source code of Catch itself and reuse the resulting object file for linking. // Let Catch provide main(): #define CATCH_CONFIG_MAIN #include "catch.hpp" // That's it // Compile implementation of Catch for use with files that do contain tests: // - g++ -std=c++11 -Wall -I$(CATCH_SINGLE_INCLUDE) -c 000-CatchMain.cpp // - cl -EHsc -I%CATCH_SINGLE_INCLUDE% -c 000-CatchMain.cpp
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/test/src/test_conv_rule.cpp
C++
#include <algorithm> #include <iostream> #include <map> #include "catch.hpp" #include <prettyprint.h> #include <string> #include <vector> #include <exception> #include <numeric> #include <pybind11/embed.h> // everything needed for embedding #include <pybind11/functional.h> #include <pybind11/numpy.h> #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include <tuple> #include <pybind11_utils.h> #include <spconv/spconv_ops.h> namespace py = pybind11; TEST_CASE("GetConvIndPair", "[SpConvNet]") { using namespace py::literals; py::scoped_interpreter guard{}; // start the interpreter and keep it alive py::exec(R"( from __future__ import print_function import numpy as np import math # import spconv # import torch def get_convolution_output_size(input_size, kernel_size, stride, padding=None, rate=None): ndim = len(input_size) if padding is None: padding = [0] * ndim output_size = [] for i in range(ndim): output_size.append((input_size[i] + 2 * padding[i] - ( (kernel_size[i] - 1) + 1)) // stride[i] + 1) return output_size def get_test_sparse_data(shape, num_points, num_channels, integer=False, dtype=np.float32): dense_shape = shape ndim = len(dense_shape) # num_points = np.random.randint(10, 100, size=[batch_size, ndim]) num_points = np.array(num_points) # num_points = np.array([3, 2]) batch_size = len(num_points) batch_indices = [] coors_total = np.stack( np.meshgrid(*[np.arange(0, s) for s in shape]), axis=-1) coors_total = coors_total.reshape(-1, ndim) for i in range(batch_size): np.random.shuffle(coors_total) inds_total = coors_total[:num_points[i]] inds_total = np.pad( inds_total, ((0, 0), (0, 1)), mode="constant", constant_values=i) batch_indices.append(inds_total) if integer: sparse_data = np.random.randint( 20, 100, size=[num_points.sum(), num_channels]).astype(dtype) else: sparse_data = np.random.uniform( -1, 1, size=[num_points.sum(), num_channels]).astype(dtype) # sparse_data = np.arange(1, num_points.sum() + 1).astype(np.float32).reshape(5, 1) dense_data = np.zeros( [batch_size, num_channels, *dense_shape], dtype=sparse_data.dtype) start = 0 for i, inds in enumerate(batch_indices): for j, ind in enumerate(inds): dense_slice = (i, slice(None), *ind[:-1]) dense_data[dense_slice] = sparse_data[start + j] start += len(inds) batch_indices = np.concatenate(batch_indices, axis=0) return { "features": sparse_data.astype(dtype), "indices": batch_indices.astype(np.int32), "features_dense": dense_data.astype(dtype), } shape = [50, 30, 30] num_points = [5000] * 1 # np.random.seed(np.random.randint(1, 100000)) in_channels = 64 sparse_dict = get_test_sparse_data(shape, num_points, in_channels) features = np.ascontiguousarray(sparse_dict["features"]).astype(np.float32) indices = np.ascontiguousarray(sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"] # indices_t = torch.from_numpy(indices) filters = np.random.uniform(0, 1, size=[3, 3, 3, 64, 64]).astype(np.float32) # print(outids.shape) )"); SECTION("DebugTest"){ auto inds = array2TensorView<int>(py::array(py::globals()["indices"])); auto inds_tensor = torch::from_blob(inds.data(), {inds.dim(0), inds.dim(1)}, torch::dtype(torch::kInt32)); auto inds_gpu = inds_tensor.to(torch::Device(torch::kCPU)); auto features = array2TensorView<float>(py::array(py::globals()["features"])); auto features_tensor = torch::from_blob(features.data(), {features.dim(0), features.dim(1)}, torch::dtype(torch::kFloat)); auto features_gpu = features_tensor.to(torch::Device(torch::kCUDA, 0)); auto filters = array2TensorView<float>(py::array(py::globals()["filters"])); auto filters_tensor = torch::from_blob(filters.data(), {filters.dim(0), filters.dim(1), filters.dim(2), filters.dim(3), filters.dim(4)}, torch::dtype(torch::kFloat)); auto filters_gpu = filters_tensor.to(torch::Device(torch::kCUDA, 0)); auto outputs = spconv::getIndicePair<3>(inds_gpu, 1, {46, 26, 26}, {50, 30, 30}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {2, 2, 2}, {0, 0, 0}, 0, 0, 0); // std::cout << outputs[2] << std::endl; /* auto output = spconv::indiceConv<float>(features_gpu, filters_gpu, outputs[1], outputs[2], outputs[0].size(0), false); std::cout << output << std::endl;*/ } }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/extern/spconv/test/test_conv.py
Python
# Copyright 2019 Yan Yan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from pathlib import Path import numpy as np import torch from torch import nn import spconv from spconv.test_utils import TestCase, generate_sparse_data, params_grid # import sparseconvnet as scn class SparseConv3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride, padding, dilation, algo=spconv.ConvAlgo.Native): super().__init__() layers = [ spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False, use_hash=False, algo=algo) ] for i in range(1, num_layers): layers.append( spconv.SparseConv3d(out_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False, use_hash=False, algo=algo)) self.net = spconv.SparseSequential(*layers, ) # self.grid = torch.full([3, *shape], -1, dtype=torch.int32).cuda() self.grid = None self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size, self.grid) return self.net(x) # .dense() class SubMConv3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride, padding, dilation, algo=spconv.ConvAlgo.Native): super().__init__() layers = [ spconv.SubMConv3d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False, algo=algo) ] for i in range(1, num_layers): layers.append( spconv.SubMConv3d(out_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False, algo=algo)) self.net = spconv.SparseSequential(*layers, ) # self.grid = torch.full([3, *shape], -1, dtype=torch.int32).cuda() self.grid = None self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() # .cpu() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size, self.grid) return self.net(x) # .dense() class Conv3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride, padding, dilation): super().__init__() layers = [ nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False) ] for i in range(1, num_layers): layers.append( nn.Conv3d(out_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False)) self.net = nn.Sequential(*layers, ) self.shape = shape def forward(self, x): return self.net(x) # .dense() class SparseDeConv3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride, padding, dilation): super().__init__() layers = [ spconv.SparseConvTranspose3d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False) ] for i in range(1, num_layers): layers.append( spconv.SparseConvTranspose3d(out_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False)) self.net = spconv.SparseSequential(*layers, ) self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) return self.net(x) # .dense() class DeConv3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride, padding, dilation): super().__init__() layers = [ nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False) ] for i in range(1, num_layers): layers.append( nn.ConvTranspose3d(out_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False)) self.net = nn.Sequential(*layers, ) self.shape = shape def forward(self, x): return self.net(x) # .dense() class SparseMaxPoolTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, kernel_size, stride, padding, dilation): super().__init__() layers = [ spconv.SparseMaxPool3d(kernel_size, stride, padding, dilation) ] for i in range(1, num_layers): layers.append( spconv.SparseMaxPool3d(kernel_size, stride, padding, dilation)) self.net = spconv.SparseSequential(*layers, ) self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) return self.net(x) # .dense() class MaxPool3dTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, kernel_size, stride, padding, dilation): super().__init__() layers = [nn.MaxPool3d(kernel_size, stride, padding, dilation)] for i in range(1, num_layers): layers.append(nn.MaxPool3d(kernel_size, stride, padding, dilation)) self.net = nn.Sequential(*layers, ) self.shape = shape def forward(self, x): return self.net(x) # .dense() class SubmanifoldConvTestTorch(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride): super().__init__() layers = [ spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key="subm0") ] for i in range(1, num_layers): layers.append( spconv.SubMConv3d(out_channels, out_channels, kernel_size, bias=False)) self.net = nn.Sequential(*layers, ) self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) return self.net(x) class SCNCoupleDeConvTest(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride): super().__init__() self.scn_input = scn.InputLayer(ndim, shape, mode=0) self.net = nn.Sequential( scn.Convolution(ndim, in_channels, out_channels, kernel_size, stride, bias=False), scn.Deconvolution(ndim, out_channels, in_channels, kernel_size, stride, bias=False), scn.SparseToDense(ndim, in_channels), ) def forward(self, features, coors, batch_size): coors = coors.long().cpu() x = self.scn_input((coors, features)) return self.net(x) class SparseCoupleDeConvTest(nn.Module): def __init__(self, num_layers, ndim, shape, in_channels, out_channels, kernel_size, stride): super().__init__() self.net = spconv.SparseSequential( spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride, indice_key="cp0", bias=False), spconv.SparseInverseConv3d(out_channels, in_channels, kernel_size, indice_key="cp0", bias=False), ) self.todense = spconv.ToDense() self.shape = shape def forward(self, features, coors, batch_size): coors = coors.int() x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) return self.todense(self.net(x)) # .dense() def gather_nd(params, indices): # this function has a limit that MAX_ADVINDEX_CALC_DIMS=5 ndim = indices.shape[-1] output_shape = list(indices.shape[:-1]) + list( params.shape[indices.shape[-1]:]) flatted_indices = indices.view(-1, ndim) slices = [flatted_indices[:, i] for i in range(ndim)] slices += [Ellipsis] return params[slices].view(*output_shape) def scatter_nd(indices, updates, shape): """pytorch edition of tensorflow scatter_nd. this function don't contain except handle code. so use this carefully when indice repeats, don't support repeat add which is supported in tensorflow. """ ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device) ndim = indices.shape[-1] output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:] flatted_indices = indices.view(-1, ndim) slices = [flatted_indices[:, i] for i in range(ndim)] slices += [Ellipsis] ret[slices] = updates.view(*output_shape) return ret class TestSpConv(TestCase): def testSpConv3d(self): np.random.seed(484) devices = ["cpu:0"] shapes = [[19, 18, 17]] batchsizes = [1, 2] in_channels = [64] out_channels = [32, 48, 64] ksizes = [2, 3] strides = [1, 2, 3] paddings = [0, 1, 2] dilations = [1, 2, 3] for dev, shape, bs, IC, OC, k, s, p, d in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides, paddings, dilations): if all([s > 1, d > 1]): continue # don't support this. device = torch.device(dev) num_points = [1000] * bs sparse_dict = generate_sparse_data(shape, num_points, IC) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) filters = np.random.uniform(0, 1, size=[k, k, k, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device) features_t = torch.from_numpy(features).to(device) features_t.requires_grad = True features_dense_t = torch.from_numpy(features_dense).to(device) features_dense_t.requires_grad = True net = SparseConv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device) net_ref = Conv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device) filters_t = torch.from_numpy(filters).to(device) net_ref.net[0].weight.data[:] = filters_t.permute(4, 3, 0, 1, 2).contiguous() net.net[0].weight.data[:] = filters_t out_ref = net_ref(features_dense_t) out = net(features_t, indices_t, bs).dense() dout = np.random.uniform(-0.2, 0.2, out_ref.shape).astype(features.dtype) dout_t = torch.from_numpy(dout).to(device) out.backward(dout_t) out_ref.backward(dout_t) din_dense = features_dense_t.grad.detach().permute(0, 2, 3, 4, 1).contiguous() din_sparse = gather_nd(din_dense, indices_t.long()) din = features_t.grad.detach() din_np = din.cpu().numpy() din_sparse_np = din_sparse.cpu().numpy() self.assertAllClose(din_np, din_sparse_np, atol=1e-4) for layer, layer_ref in zip(net.net, net_ref.net): dw = layer.weight.grad.detach().cpu().numpy() dw_ref = layer_ref.weight.grad.detach().cpu().numpy() dw = dw.transpose(4, 3, 0, 1, 2) self.assertAllClose(dw, dw_ref, atol=1e-4) out_np = out.detach().cpu().numpy() out_ref_np = out_ref.detach().cpu().numpy() self.assertAllClose(out_np, out_ref_np, atol=1e-4) def testSpDeConv3d(self): np.random.seed(484) devices = ["cuda:0"] shapes = [[19, 18, 17]] batchsizes = [1, 2] in_channels = [64] out_channels = [32, 48, 64] ksizes = [2, 3] strides = [2, 3] paddings = [0, 1, 2] dilations = [1, 2, 3] for dev, shape, bs, IC, OC, k, s, p, d in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides, paddings, dilations): if all([s > 1, d > 1]): continue # don't support this. device = torch.device(dev) num_points = [1000] * bs sparse_dict = generate_sparse_data(shape, num_points, IC) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) filters = np.random.uniform(0, 1, size=[k, k, k, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device) features_t = torch.from_numpy(features).to(device) features_t.requires_grad = True features_dense_t = torch.from_numpy(features_dense).to(device) features_dense_t.requires_grad = True net = SparseDeConv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device) net_ref = DeConv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device) filters_t = torch.from_numpy(filters).to(device) net_ref.net[0].weight.data[:] = filters_t.permute(3, 4, 0, 1, 2).contiguous() net.net[0].weight.data[:] = filters_t out_ref = net_ref(features_dense_t) out = net(features_t, indices_t, bs).dense() dout = np.random.uniform(-0.2, 0.2, out_ref.shape).astype(features.dtype) dout_t = torch.from_numpy(dout).to(device) out.backward(dout_t) out_ref.backward(dout_t) din_dense = features_dense_t.grad.detach().permute(0, 2, 3, 4, 1).contiguous() din_sparse = gather_nd(din_dense, indices_t.long()) din = features_t.grad.detach() din_np = din.cpu().numpy() din_sparse_np = din_sparse.cpu().numpy() self.assertAllClose(din_np, din_sparse_np, atol=1e-4) for layer, layer_ref in zip(net.net, net_ref.net): dw = layer.weight.grad.detach().cpu().numpy() dw_ref = layer_ref.weight.grad.detach().cpu().numpy() dw = dw.transpose(3, 4, 0, 1, 2) self.assertAllClose(dw, dw_ref, atol=1e-4) out_np = out.detach().cpu().numpy() out_ref_np = out_ref.detach().cpu().numpy() self.assertAllClose(out_np, out_ref_np, atol=1e-4) def testSpCpConv3d(self): np.random.seed(484) devices = ["cuda:0", "cpu:0"] shapes = [[20, 20, 20]] batchsizes = [1, 2] in_channels = [64] out_channels = [32, 48, 64] ksizes = [2] strides = [2] paddings = [0, 1, 2] dilations = [1, 2, 3] for dev, shape, bs, IC, OC, k, s in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides): device = torch.device(dev) num_points = [1000] * bs sparse_dict = generate_sparse_data(shape, num_points, IC) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) filters = np.random.uniform(0, 1, size=[k, k, k, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device) indices_scn_t = torch.from_numpy( indices[:, [1, 2, 3, 0]]).int().to(device) features_t = torch.from_numpy(features).to(device) features_t.requires_grad = True features_ref_t = torch.from_numpy(features).to(device) features_ref_t.requires_grad = True net_ref = SCNCoupleDeConvTest(1, 3, shape, IC, OC, k, s).to(device) net = SparseCoupleDeConvTest(1, 3, shape, IC, OC, k, s).to(device) net_ref.net[0].weight.data[:] = net.net[0].weight.data[:].view( *net_ref.net[0].weight.shape) net_ref.net[1].weight.data[:] = net.net[1].weight.data[:].view( *net_ref.net[1].weight.shape) out_ref = net_ref(features_ref_t, indices_scn_t, bs) out = net(features_t, indices_t, bs) dout = np.random.uniform(-0.2, 0.2, out_ref.shape).astype(features.dtype) dout_t = torch.from_numpy(dout).to(device) out.backward(dout_t) out_ref.backward(dout_t) din = features_t.grad.detach() din_ref = features_ref_t.grad.detach() din_np = din.cpu().numpy() din_ref_np = din_ref.cpu().numpy() self.assertAllClose(din_ref_np, din_np, atol=1e-4) for layer, layer_ref in zip(net.net, net_ref.net): dw = layer.weight.grad.detach().cpu().numpy() dw_ref = layer_ref.weight.grad.detach().cpu().view( *dw.shape).numpy() self.assertAllClose(dw, dw_ref, atol=1e-4) out_np = out.detach().cpu().numpy() out_ref_np = out_ref.detach().cpu().numpy() self.assertAllClose(out_np, out_ref_np, atol=1e-4) def testSpMaxPool3d(self): np.random.seed(485) devices = ["cuda:0"] shapes = [[19, 18, 17]] batchsizes = [1, 2] in_channels = [62] out_channels = [62] ksizes = [2, 3] strides = [1, 2, 3] paddings = [0, 1] dilations = [1, 2, 3] for dev, shape, bs, IC, OC, k, s, p, d in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides, paddings, dilations): if all([s > 1, d > 1]): continue # don't support this. device = torch.device(dev) num_points = [1000] * bs # when data contains negative, sparse maxpool is not equal to dense maxpool. sparse_dict = generate_sparse_data(shape, num_points, IC, data_range=[0.1, 1]) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) filters = np.random.uniform(0, 1, size=[k, k, k, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device) features_t = torch.from_numpy(features).to(device) features_t.requires_grad = True features_dense_t = torch.from_numpy(features_dense).to(device) features_dense_t.requires_grad = True net = SparseMaxPoolTestTorch(1, 3, shape, k, s, p, d).to(device) net_ref = MaxPool3dTestTorch(1, 3, shape, k, s, p, d).to(device) out_ref = net_ref(features_dense_t) out = net(features_t, indices_t, bs) outids = out.indices outfeatures = out.features outids_dev = outids.float() out_dense = out.dense(channels_first=False) out = out_dense.permute(0, 4, 1, 2, 3).contiguous() dout_sparse = np.random.uniform( -0.2, 0.2, outfeatures.shape).astype(features.dtype) dout_sparse_t = torch.from_numpy(dout_sparse).to(device) dout_t = scatter_nd(outids.long(), dout_sparse_t, list(out_dense.shape)) dout_t = dout_t.permute(0, 4, 1, 2, 3).contiguous() out.backward(dout_t) out_ref.backward(dout_t) din_dense = features_dense_t.grad.detach().permute(0, 2, 3, 4, 1).contiguous() din_sparse = gather_nd(din_dense, indices_t.long()) din = features_t.grad.detach() out_np = out.detach().cpu().numpy() out_ref_np = out_ref.detach().cpu().numpy() self.assertAllClose(out_np, out_ref_np, atol=1e-4) din_np = din.cpu().numpy() din_sparse_np = din_sparse.cpu().numpy() self.assertAllClose(din_np, din_sparse_np, atol=1e-4) def main(algo=spconv.ConvAlgo.Native, dtype=torch.float32): # function for develop. np.random.seed(484) # devices = ["cuda:0"] devices = ["cuda:0"] shapes = [[400, 400, 15]] batchsizes = [2] in_channels = [32] out_channels = [64] ksizes = [(3, 3, 3)] strides = [1] paddings = [0] dilations = [1] for dev, shape, bs, IC, OC, k, s, p, d in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides, paddings, dilations): if all([s > 1, d > 1]): continue device = torch.device(dev) num_points = [30000] * bs sparse_dict = generate_sparse_data(shape, num_points, IC) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) indices_t = torch.from_numpy(indices) filters = np.random.uniform(0, 1, size=[k[0], 1, 1, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device).to(dtype) features_t = torch.from_numpy(features).to(device).to(dtype) features_dense_t = torch.from_numpy(features_dense).to(device).to( dtype) net = SparseConv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d, algo=algo).to(device).to(dtype) net_ref = Conv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device).to(dtype) filters_t = torch.from_numpy(filters).to(device).to(dtype) net_ref.net[0].weight[:] = filters_t.permute(4, 3, 0, 1, 2).contiguous() net.net[0].weight[:] = filters_t out_ref = net_ref(features_dense_t) times = [] for i in range(10): t = time.time() out = net(features_t, indices_t, bs) torch.cuda.synchronize() times.append(time.time() - t) # print((net.grid == -1).float().sum(), net.grid.numel()) # print("spconv time", time.time() - t) print("spconv time", np.mean(times[2:])) out = net(features_t, indices_t, bs) # print(out.indices) out = out.dense() out_numpy = out.detach().cpu().numpy() print( np.linalg.norm(out.detach().cpu().numpy() - out_ref.detach().cpu().numpy())) print(out_numpy.min(), out_numpy.max(), out_numpy.mean(), out_numpy.sum()) def main_subm(algo, dtype=torch.float32): # function for develop. np.random.seed(484) torch.manual_seed(50051) # devices = ["cuda:0"] devices = ["cuda:0"] shapes = [[400, 400, 15]] batchsizes = [2] in_channels = [32] out_channels = [64] ksizes = [(3, 3, 3)] strides = [1] paddings = [1] dilations = [1] for dev, shape, bs, IC, OC, k, s, p, d in params_grid( devices, shapes, batchsizes, in_channels, out_channels, ksizes, strides, paddings, dilations): if all([s > 1, d > 1]): continue device = torch.device(dev) num_points = [120000] * bs sparse_dict = generate_sparse_data(shape, num_points, IC) features = np.ascontiguousarray(sparse_dict["features"]).astype( np.float32) indices = np.ascontiguousarray( sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32) features_dense = sparse_dict["features_dense"].astype(np.float32) indices_t = torch.from_numpy(indices) filters = np.random.uniform(0, 1, size=[k[0], 1, 1, IC, OC]).astype(np.float32) indices_t = torch.from_numpy(indices).int().to(device).to(dtype) features_t = torch.from_numpy(features).to(device).to(dtype) features_dense_t = torch.from_numpy(features_dense).to(device).to( dtype) net = SubMConv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d, algo=algo).to(device).to(dtype) net_ref = Conv3dTestTorch(1, 3, shape, IC, OC, k, s, p, d).to(device).to(dtype) filters_t = torch.from_numpy(filters).to(device).to(dtype) net_ref.net[0].weight[:] = filters_t.permute(4, 3, 0, 1, 2).contiguous() net.net[0].weight[:] = filters_t out_ref = net_ref(features_dense_t) times = [] for i in range(20): t = time.time() out = net(features_t, indices_t, bs) torch.cuda.synchronize() times.append(time.time() - t) # print((net.grid == -1).float().sum(), net.grid.numel()) # print("spconv time", time.time() - t) print("spconv time", np.mean(times[10:])) out = net(features_t, indices_t, bs) # print(out.indices) out = out.dense() out_numpy = out.detach().cpu().numpy() # print( # np.linalg.norm(out.detach().cpu().numpy() - # out_ref.detach().cpu().numpy())) print(out_numpy.min(), out_numpy.max(), out_numpy.mean(), out_numpy.sum()) return out_numpy if __name__ == '__main__': main_subm(algo=spconv.ConvAlgo.Native, dtype=torch.float32) main_subm(algo=spconv.ConvAlgo.Native, dtype=torch.half) # TestCase().assertAllClose(out_my, out_ref) # unittest.main() # TestSpConv().testSpConv3d()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/fg_reconstructor.py
Python
import copy from pathlib import Path from types import SimpleNamespace from typing import List import camtools as ct import nksr import numpy as np import open3d as o3d import torch from pycg import vis from lit.containers.fg_object import FGObject from lit.extern.deepsdf.complete import DeepSDFEngine from lit.mvfg_utils import MVDeepSDFModel, fps_sampling from lit.recon_utils import bbox_to_corners, bbox_to_lineset, largest_cluster_mesh def _get_deepsdf_root(): import lit.extern.deepsdf if lit.extern.deepsdf.__path__: deepsdf_root = Path(lit.extern.deepsdf.__path__[0]) return deepsdf_root else: raise RuntimeError("No directory path found for the lit.extern.deepsdf package") class NKSRReconstructor: """ Reconstruct mesh given point cloud or point cloud with normals. """ def __init__(self): self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.nksr_reconstructor = nksr.Reconstructor(self.device) def reconstruct(self, points, normals=None): """ Reconstruct mesh given point cloud or point cloud with normals. Args: points: (N, 3), point cloud. normals: (N, 3), normals of the point cloud. Returns: mesh: open3d.geometry.TriangleMesh, reconstructed mesh. """ # Make a copy. points = np.copy(points) # Normalize points to unit cube. centroid = np.mean(points, axis=0) points -= centroid scale = np.max(np.linalg.norm(points, axis=1)) points /= scale if normals is None: # Estimate normals with Open3D. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) pcd.estimate_normals() normals = np.asarray(pcd.normals) # Convert to torch. points = torch.from_numpy(points).float().to(self.device) normals = torch.from_numpy(normals).float().to(self.device) # Reconstruct. field = self.nksr_reconstructor.reconstruct( xyz=points, normal=normals, voxel_size=0.05, ) nskr_mesh = field.extract_dual_mesh(mise_iter=2) vertices = nskr_mesh.v.cpu().numpy() triangles = nskr_mesh.f.cpu().numpy() # Denormalize. vertices *= scale vertices += centroid # Convert to open3d. mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.triangles = o3d.utility.Vector3iVector(triangles) mesh.compute_vertex_normals() return mesh class BallPivotingReconstructor: """ Reconstruct mesh given point cloud or point cloud with ball pivoting. """ def __init__(self, radii=[0.05, 0.1, 0.2]): self.radii = radii def reconstruct(self, points): # Make a copy. points = np.copy(points) # Normalize points to unit cube. centroid = np.mean(points, axis=0) points -= centroid scale = np.max(np.linalg.norm(points, axis=1)) points /= scale # Convert to open3d. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) pcd.estimate_normals() # Reconstruct. mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting( pcd, o3d.utility.DoubleVector(self.radii), ) # Denormalize. vertices = np.asarray(mesh.vertices) vertices *= scale vertices += centroid mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.compute_vertex_normals() return mesh class AlphaShapeReconstructor: """ Reconstruct mesh given point cloud or point cloud with alpha shape. """ def __init__(self, alpha=0.18): self.alpha = alpha def reconstruct(self, points): # Make a copy. points = np.copy(points) # Normalize points to unit cube. centroid = np.mean(points, axis=0) points -= centroid scale = np.max(np.linalg.norm(points, axis=1)) points /= scale # Convert to open3d. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) pcd.estimate_normals() # Reconstruct. mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape( pcd, self.alpha, ) # Denormalize. vertices = np.asarray(mesh.vertices) vertices *= scale vertices += centroid mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.compute_vertex_normals() return mesh class PointCloudCompleter: """ Point Cloud Completer class with hard-coded config and model loading for KITTI point cloud completion. - config: cfgs/PCN_models/PoinTr.yaml - model: ckpts/KITTI.pth """ def __init__(self) -> None: # Hard-coded config and model paths. self.config_path = _pointr_root / "cfgs/PCN_models/PoinTr.yaml" self.ckpt_path = _pointr_root / "ckpts/KITTI.pth" if not self.config_path.is_file(): raise FileNotFoundError(f"Config file not found at {self.config_path}") if not self.ckpt_path.is_file(): raise FileNotFoundError(f"Model file not found at {self.ckpt_path}") # Load config and model. self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.config = cfg_from_yaml_file(self.config_path) self.model = builder.model_builder(self.config.model) builder.load_model(self.model, self.ckpt_path) self.model.to(self.device) self.model.eval() # Prepare transformations. self.transforms = Compose( [ { "callback": "NormalizeObjectPose", "parameters": { "input_keys": { "ptcloud": "points", "bbox": "bbox_corners", } }, "objects": [ "points", "bbox_corners", ], }, { "callback": "RandomSamplePoints", "parameters": {"n_points": 2048}, "objects": ["points"], }, { "callback": "ToTensor", "objects": [ "points", "bbox_corners", ], }, ] ) self.inverse_transforms = Compose( [ { "callback": "InverseNormalizeObjectPose", "parameters": { "input_keys": { "ptcloud": "points", "bbox": "bbox_corners", } }, "objects": [ "points", "bbox_corners", ], }, { "callback": "ToTensor", "objects": [ "points", "bbox_corners", ], }, ] ) def complete(self, src_points, src_bbox_corners): """ Complete a point cloud. Normalization is critical. See: - datasets/KITTIDataset.py - datasets/data_transforms.py Args: src_points: (N, 3), point cloud to complete. bbox_corners: (8, 3), corners of a bounding box defined in PCN format. Returns: dst_points: (N, 3), completed point cloud. """ # Normalize. src_data = { "points": src_points.copy(), "bbox_corners": src_bbox_corners.copy(), } src_data = self.transforms(src_data) src_points = src_data["points"] # Inference ret = self.model(src_points.unsqueeze(0).to(self.device)) dst_points = ret[-1].squeeze(0).detach().cpu().numpy() # Inverse normalize. dst_data = { "points": dst_points.copy(), "bbox_corners": src_bbox_corners.copy(), } dst_data = self.inverse_transforms(dst_data) dst_points = dst_data["points"] # Remove statistical outliers. dst_pcd = o3d.geometry.PointCloud() dst_pcd.points = o3d.utility.Vector3dVector(dst_points) dst_pcd, _ = dst_pcd.remove_statistical_outlier( nb_neighbors=20, std_ratio=2.0, ) dst_points = np.asarray(dst_pcd.points) return dst_points class FGReconstructor: """ Foreground reconstructors. """ def __init__( self, use_deepsdf: bool = True, use_mvdeepsdf: bool = False, deepsdf_params: dict = None, ) -> None: self.device = torch.device("cuda:0") # self.completer = PointCloudCompleter() self.nksr_reconstructor = NKSRReconstructor() self.ball_reconstructor = BallPivotingReconstructor() self.alph_reconstructor = AlphaShapeReconstructor(alpha=0.20) # self.sap_reconstructor = SAPReconstructor(max_bbox_extend=0.95) # DeepSDF engine. dp = SimpleNamespace() dp.iters = 200 dp.lr = 1e-4 dp.num_samples = 8000 dp.voxel_resolution = 128 dp.max_batch = 32**3 dp.l2reg = True dp.clamp_dist = 0.1 if deepsdf_params is not None: for k, v in deepsdf_params.items(): setattr(dp, k, v) print(f"FGReconstructor: deepsdf_params={dp}") deepsdf_root = _get_deepsdf_root() self.deepsdf_engine = DeepSDFEngine( specs_path=deepsdf_root / "packaged" / "specs.json", ckpt_path=deepsdf_root / "packaged" / "ckpt.pth", mean_latent_path=deepsdf_root / "packaged" / "mean_latent.pth", iters=dp.iters, lr=dp.lr, num_samples=dp.num_samples, voxel_resolution=dp.voxel_resolution, max_batch=dp.max_batch, l2reg=dp.l2reg, clamp_dist=dp.clamp_dist, ) # v1: use_deepsdf = False. # v2: use_deepsdf = True. # v3: use_deepsdf = True. self.use_deepsdf = use_deepsdf print(f"FGReconstructor: use_deepsdf={self.use_deepsdf}") if use_mvdeepsdf and not use_deepsdf: raise ValueError("use_mvdeepsdf requires use_deepsdf to be True.") self.use_mvdeepsdf = use_mvdeepsdf if self.use_mvdeepsdf: self.mvdeepsdf_model = MVDeepSDFModel( ckpt_path=_lit_root / "tools/mvdeepsdf_log/default/ckpts/0050.pth" ) self.mvdeepsdf_model.eval() self.mvdeepsdf_model.to(self.device) @staticmethod def resize_mesh_to_fit_bbox(mesh, axis_aligned_centered_corners): """ Resize mesh to fit tightly within bbox_corners. Args: mesh: open3d.geometry.TriangleMesh axis_aligned_centered_corners: (8, 3), corners of bounding boxes. This is used to normalize the points for point cloud completion. The corners are assumed to be: - axis-aligned - centered around the origin Returns: open3d.geometry.TriangleMesh """ vertices = np.asarray(mesh.vertices) # Calculate the size of the bounding box and the mesh. bbox_min_bound = axis_aligned_centered_corners.min(axis=0) bbox_max_bound = axis_aligned_centered_corners.max(axis=0) bbox_extents = bbox_max_bound - bbox_min_bound mesh_min_bound = vertices.min(axis=0) mesh_max_bound = vertices.max(axis=0) mesh_extents = mesh_max_bound - mesh_min_bound # Move the mesh to the origin, scale it, and move it to the correct position. scale_factors = bbox_extents / mesh_extents vertices = (vertices - mesh_min_bound) * scale_factors + bbox_min_bound np.testing.assert_allclose(vertices.min(axis=0), bbox_min_bound) np.testing.assert_allclose(vertices.max(axis=0), bbox_max_bound) # Create a new mesh. scaled_mesh = o3d.geometry.TriangleMesh() scaled_mesh.vertices = o3d.utility.Vector3dVector(vertices) scaled_mesh.triangles = mesh.triangles return scaled_mesh @staticmethod def _rotate_canonical_to_shapenet(points: np.ndarray): """ Rotate points from our (Waymo) convention to ShapeNet convention. """ if points.ndim != 2 or points.shape[1] != 3: raise ValueError("Input points array must have shape (N, 3)") R = np.array( [ [0, -1, 0], [0, 0, 1], [-1, 0, 0], ] ) transform = np.eye(4) transform[:3, :3] = R return ct.transform.transform_points(points, transform) @staticmethod def _rotate_shapenet_to_canonical(points: np.ndarray): """ Rotate points from ShapeNet convention back to our (Waymo) convention. """ if points.ndim != 2 or points.shape[1] != 3: raise ValueError("Input points array must have shape (N, 3)") R = np.array( [ [0, 0, -1], [-1, 0, 0], [0, 1, 0], ] ) transform = np.eye(4) transform[:3, :3] = R return ct.transform.transform_points(points, transform) def recon_deepsdf( self, mv_canonical_points: List[np.ndarray], fused_canonical_points: np.ndarray, axis_aligned_centered_corners: np.ndarray, ): """ Reconstruct fused foreground points, with DeepSDF-based method. Visualize with an additional heading line. Args: points: (N, 3), foreground points. axis_aligned_centered_corners: (8, 3), corners of bounding boxes. This is used to normalize the points for point cloud completion. The corners are assumed to be: - axis-aligned - centered around the origin """ if fused_canonical_points.ndim != 2 or fused_canonical_points.shape[1] != 3: raise ValueError("Input points array must have shape (N, 3)") if axis_aligned_centered_corners.shape != (8, 3): raise ValueError("Input corners array must have shape (8, 3)") # Remove statistical outliers. fused_canonical_pcd = o3d.geometry.PointCloud() fused_canonical_pcd.points = o3d.utility.Vector3dVector(fused_canonical_points) fused_canonical_pcd, _ = fused_canonical_pcd.remove_statistical_outlier( nb_neighbors=20, std_ratio=1.0, ) if len(fused_canonical_pcd.points) > 0: fused_canonical_points = np.asarray(fused_canonical_pcd.points) # Rotate waymo->shapenet. mesh = self.deepsdf_engine.canonical_points_to_mesh( fused_canonical_points, do_normalize=True ) mesh.compute_vertex_normals() if self.use_mvdeepsdf: # Prepare inputs for self.mvdeepsdf_model. fgmv = ForegroundMultiView( mv_canonical_points=mv_canonical_points, fused_canonical_points=fused_canonical_points, ) mv_deepsdf_points = [] mv_enforced_fps_deepsdf_points = [] for canonical_points in fgmv.mv_canonical_points: deepsdf_points = fgmv.one_step_canonical_to_shapenet(canonical_points) mv_deepsdf_points.append(deepsdf_points) enforced_fps_deepsdf_points = fps_sampling( points=deepsdf_points, num_fps_samples=256, allow_fewer_points=False, ) mv_enforced_fps_deepsdf_points.append(enforced_fps_deepsdf_points) mv_enforced_fps_deepsdf_points = np.array( mv_enforced_fps_deepsdf_points, dtype=np.float32 ) # (B, 256, 3) -> (B, 3, 256) mv_points = mv_enforced_fps_deepsdf_points.transpose(0, 2, 1) fused_deepsdf_points = np.concatenate(mv_deepsdf_points, axis=0) fused_deepsdf_latent = self.deepsdf_engine._optimize_latent( fused_deepsdf_points, verbose=False, ) fused_deepsdf_latent = fused_deepsdf_latent.detach().cpu().numpy().flatten() mv_points = torch.from_numpy(mv_points).float().to(self.device) fused_deepsdf_latent = ( torch.from_numpy(fused_deepsdf_latent).float().to(self.device) ) # Run MV-DeepSDF inference. pd_latent = self.mvdeepsdf_model( fused_deepsdf_latent=fused_deepsdf_latent, mv_points=mv_points, ) pd_latent = pd_latent.detach().cpu().numpy().flatten() # Recon mesh with pd_latent mesh_mv = self.deepsdf_engine.np_latent_to_mesh(pd_latent) mesh_mv = fgmv.one_step_shapenet_to_canonical(mesh_mv) # Visualize visualize_mv_recon = True if visualize_mv_recon: mesh.compute_vertex_normals() mesh_mv.compute_vertex_normals() axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0) _ = vis.show_3d( [mesh, axes], [mesh_mv, axes], ) # Replace mesh with mv mesh. mesh = mesh_mv # Rescale to fit bbox. mesh = FGReconstructor.resize_mesh_to_fit_bbox( mesh, axis_aligned_centered_corners=axis_aligned_centered_corners, ) visualize_deepsdf = False if visualize_deepsdf: # Centered pointcloud. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(fused_canonical_points) # Centered corners lineset. lines = np.array( [ [0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7], ] ) ls = o3d.geometry.LineSet() ls.points = o3d.utility.Vector3dVector(axis_aligned_centered_corners) ls.lines = o3d.utility.Vector2iVector(lines) ls.colors = o3d.utility.Vector3dVector([[0, 0, 0]] * len(lines)) # Heading direction lineset. top_center = np.mean(axis_aligned_centered_corners[4:8], axis=0) top_front_center = ( axis_aligned_centered_corners[4] + axis_aligned_centered_corners[5] ) / 2 heading_ls = o3d.geometry.LineSet() heading_ls.points = o3d.utility.Vector3dVector( [top_center, top_front_center] ) heading_ls.lines = o3d.utility.Vector2iVector(np.array([[0, 1]])) heading_ls.colors = o3d.utility.Vector3dVector([[1, 0, 0]]) ls += heading_ls mesh.compute_vertex_normals() axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0) o3d.visualization.draw_geometries( [pcd, ls, axes, mesh], window_name="Recon Inputs" ) return mesh def recon( self, points: np.ndarray, axis_aligned_centered_corners: np.ndarray, ): """ Reconstruct foreground points. Args: points: (N, 3), foreground points. axis_aligned_centered_corners: (8, 3), corners of bounding boxes. This is used to normalize the points for point cloud completion. The corners are assumed to be: - axis-aligned - centered around the origin Return: o3d.geometry.TriangleMesh """ # Complete with PoinTr. points_completed = self.completer.complete( src_points=points, src_bbox_corners=axis_aligned_centered_corners, ) # Reconstruct with AlphaShape and sample points from surface. alpha_mesh = self.alph_reconstructor.reconstruct(points_completed) alpha_mesh = largest_cluster_mesh(alpha_mesh) alpha_mesh_pcd = alpha_mesh.sample_points_poisson_disk(5000) # Reconstruct with ShapeAsPoints. alpha_mesh_points = np.asarray(alpha_mesh_pcd.points) sap_mesh = self.sap_reconstructor.reconstruct(alpha_mesh_points) # Scale to fit bbox. mesh = FGReconstructor.resize_mesh_to_fit_bbox( sap_mesh, axis_aligned_centered_corners=axis_aligned_centered_corners, ) visualize_fg_recon_steps = False if visualize_fg_recon_steps: # Clone. alpha_mesh_clone = copy.deepcopy(alpha_mesh) alpha_mesh_pcd_clone = copy.deepcopy(alpha_mesh_pcd) sap_mesh_clone = copy.deepcopy(sap_mesh) mesh_clone = copy.deepcopy(mesh) # Shift up. shift_up = np.array( [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 4], [0, 0, 0, 1]] ) alpha_mesh_clone.transform(shift_up).transform(shift_up).transform(shift_up) alpha_mesh_pcd_clone.transform(shift_up).transform(shift_up) sap_mesh_clone.transform(shift_up) # BBox as lineset. lines = np.array( [ [0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7], ] ) ls = o3d.geometry.LineSet() ls.points = o3d.utility.Vector3dVector(axis_aligned_centered_corners) ls.lines = o3d.utility.Vector2iVector(lines) # Visualize. alpha_mesh_clone.compute_vertex_normals() sap_mesh_clone.compute_vertex_normals() mesh_clone.compute_vertex_normals() axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0) o3d.visualization.draw_geometries( [ axes, alpha_mesh_clone, alpha_mesh_pcd_clone, sap_mesh_clone, mesh_clone, ls, ] ) return mesh def recon_fg_object(self, fg_object: FGObject): """ Reconstruct an FGObject. Args: fg_object: The fg_object to reconstruct. Returns: o3d.geometry.TriangleMesh """ # Collect centered points for one foreground object. mv_canonical_points = [] # List of (X, 3) for fg_box in fg_object.fg_boxes: pseudo_pose = fg_box.compute_local_pseudo_pose() pseudo_T = ct.convert.pose_to_T(pseudo_pose) canonical_points = ct.transform.transform_points( fg_box.local_points, pseudo_T ) mv_canonical_points.append(canonical_points) # (N, 3) fused_canonical_points = np.concatenate(mv_canonical_points, axis=0) # Collect centered corners. # - `fused_centered_corners` is a list that contains the corners of the # bounding boxes of each FGBox in the FGObject. # - These corners are transformed to be centered around the origin using # the pseudo pose of each FGBox. # - The purpose of `fused_centered_corners` is to compute the average # corners of all the bounding boxes in the FGObject. # - One reason for using "corners" rather than bboxes directly is that # bboxes cannot be freely transformed. Bboxes has limited degrees of # freedom. But, if the bboxes are all very similar, then it might be # feasible to use averaged bboxes directly (todo in the future). fused_centered_corners = [] for fg_box in fg_object.fg_boxes: pseudo_pose = fg_box.compute_local_pseudo_pose() pseudo_T = ct.convert.pose_to_T(pseudo_pose) local_corners = bbox_to_corners(fg_box.local_bbox) centered_corners = ct.transform.transform_points(local_corners, pseudo_T) fused_centered_corners.append(centered_corners) # (M, 8, 3) fused_centered_corners = np.array(fused_centered_corners) avg_centered_corners = np.mean(fused_centered_corners, axis=0) # Reconstruct v1 or v4. if self.use_deepsdf: mesh = self.recon_deepsdf( mv_canonical_points=mv_canonical_points, fused_canonical_points=fused_canonical_points, axis_aligned_centered_corners=avg_centered_corners, ) else: mesh = self.recon( points=fused_canonical_points, axis_aligned_centered_corners=avg_centered_corners, ) # Visualize centered ls. visualize_ls = False if visualize_ls: # Centered pointcloud. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(fused_canonical_points) # Centered lineset. center_ls = o3d.geometry.LineSet() num_ls = 0 for fg_box in fg_object.fg_boxes: ls_bbox = bbox_to_lineset(fg_box.local_bbox) ls_bbox.transform(np.linalg.inv(fg_box.compute_local_pseudo_pose())) center_ls += ls_bbox num_ls += 1 print(f"Visualizing {num_ls} linesets.") axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0) o3d.visualization.draw_geometries([pcd, center_ls, axes]) # Visualize original ls (in corresponding frames). visualize_original_ls = False if visualize_original_ls: sample_every_n = 4 num_samples = 5 selected_fg_boxes = fg_object.fg_boxes[::sample_every_n][:num_samples] original_ls = o3d.geometry.LineSet() num_ls = 0 for fg_box in selected_fg_boxes: ls_bbox = bbox_to_lineset(fg_box.local_bbox) ls_bbox.transform(fg_box.frame_pose) original_ls += ls_bbox num_ls += 1 # fg_box.local_points transformed to world coord with frame_pose original_pcd = o3d.geometry.PointCloud() for fg_box in selected_fg_boxes: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(fg_box.local_points) pcd.transform(fg_box.frame_pose) original_pcd += pcd print(f"Visualizing {num_ls} linesets.") o3d.visualization.draw_geometries([original_ls, original_pcd]) # Visualize mesh. visualize_mesh = False if visualize_mesh: world_ls = o3d.geometry.LineSet() world_mesh = o3d.geometry.TriangleMesh() for fg_box in fg_object.fg_boxes: pseudo_pose = fg_box.compute_local_pseudo_pose() # Ls. Local (frame) coord -> world coord. frame_ls = bbox_to_lineset(fg_box.local_bbox) frame_ls.transform(fg_box.frame_pose) world_ls += frame_ls # Mesh. Centered coord -> local (frame) coord -> world cord. frame_mesh = copy.deepcopy(mesh) frame_mesh.transform(fg_box.frame_pose @ pseudo_pose) frame_mesh.compute_vertex_normals() world_mesh += frame_mesh o3d.visualization.draw_geometries([world_mesh, world_ls]) return mesh
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/lidar.py
Python
import pickle from abc import ABC from dataclasses import dataclass, field from pathlib import Path from typing import List import numpy as np import open3d as o3d @dataclass class LidarIntrinsics(ABC): """ Abstract class for lidar intrinsics. Ref: https://www.mathworks.com/help/lidar/ref/lidarparameters.html """ fov_up: float # Positive value. fov_down: float # Positive value. vertical_res: int horizontal_res: int max_range: float vertical_degrees: List[float] = None @dataclass class WaymoLidarIntrinsics(LidarIntrinsics): """ Waymo lidar intrinsics. Ref: https://arxiv.org/abs/1912.04838 """ fov_up: float = 2.4 # Table 2 in paper. fov_down: float = 17.6 # Table 2 in paper. vertical_res: int = 64 # Waymo range image. 200 for small lidars. horizontal_res: int = 2650 # Waymo range image. 600 for small lidars. max_range: float = 75 # Table 2 in paper. vertical_degrees: List[float] = None @dataclass class NuScenesLidarIntrinsics(LidarIntrinsics): """ NuScenes lidar intrinsics. https://velodynelidar.com/wp-content/uploads/2019/12/97-0038-Rev-N-97-0038-DATASHEETWEBHDL32E_Web.pdf - 32 Channels - Measurement Range: Up to 100 m (70 used in paper) - Range Accuracy: Up to ±2 cm (Typical)1 - Single and Dual Returns (Strongest, Last) - Field of View (Vertical): +10.67° to -30.67° (41.33°) - Angular Resolution (Vertical): 1.33° - Field of View (Horizontal): 360° - Angular Resolution (Horizontal/Azimuth): 0.08° - 0.33° - Rotation Rate: 5 Hz - 20 Hz - 5 Hz: higher resolution - 20 Hz: higher frame rate <-> 0.33° <-> 360 / 0.33 = 1090 - Integrated Web Server for Easy Monitoring and Configuration """ # As vertical_degrees is used, fov_up and fov_down are used to filter # rays that are out-of-range. fov_up: float = 11.3139 # 10.65520 + (10.65520 - 9.33780) * 0.5 fov_down: float = 31.36527 # 30.68386 + (30.68386 - 29.32104) * 0.5 # These are the values from the spec. # fov_up: float = 10.67 # Table 2 in paper (10). # fov_down: float = 30.67 # Table 2 in paper (-30). vertical_res: int = 32 # Table 2 in paper. horizontal_res: int = 1090 # From spec. max_range: float = 70 # Table 2 in paper. vertical_degrees: List[float] = field( default_factory=lambda: [ 10.65520, 9.33780, 7.97498, 6.65758, 5.34018, 3.97735, 2.65996, 1.34256, -0.02027, -1.33767, -2.65507, -4.01789, -5.33529, -6.65269, -8.01552, -9.33292, -10.65032, -12.01314, -13.33054, -14.64794, -16.01077, -17.32817, -18.64557, -20.00839, -21.32579, -22.64319, -24.00602, -25.32341, -26.64081, -28.00364, -29.32104, -30.68386, ] ) def __post_init__(self): assert len(self.vertical_degrees) == self.vertical_res @dataclass class NuScenesVanillaLidarIntrinsics(LidarIntrinsics): """ NuScenes lidar intrinsics without vertical_degrees. In this way, the lidar rays are evenly distributed in the vertical direction. """ # https://velodynelidar.com/wp-content/uploads/2019/12/97-0038-Rev-N-97-0038-DATASHEETWEBHDL32E_Web.pdf fov_up: float = 10.67 fov_down: float = 30.67 # These are the values from the spec. # fov_up: float = 10.67 # Table 2 in paper (10). # fov_down: float = 30.67 # Table 2 in paper (-30). vertical_res: int = 32 # Table 2 in paper. horizontal_res: int = 1090 # From spec. max_range: float = 70 # Table 2 in paper. vertical_degrees: List[float] = None @dataclass class KITTILidarIntrinsics(LidarIntrinsics): """ KITTI lidar intrinsics. Ref: - Vision meets Robotics: The KITTI Dataset https://www.cvlibs.net/publications/Geiger2013IJRR.pdf - Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite https://www.cvlibs.net/publications/Geiger2012CVPR.pdf - KITTI Setup https://www.cvlibs.net/datasets/kitti/setup.php - Lidar Distillation https://arxiv.org/abs/2203.14956 """ fov_up: float = 3.2 # Lidar Distillation. fov_down: float = 23.6 # Lidar Distillation. vertical_res: int = 64 # Vision meets Robotics: The KITTI Dataset. horizontal_res: int = 1863 # Lidar Distillation. max_range: float = 120 # Velodyne HDL-64E's theoretical range. vertical_degrees: List[float] = None @dataclass class Lidar: """ A Lidar object is parametrized by its intrinsics and pose. A Lidar can intersect meshes and return the intersection points. """ intrinsics: LidarIntrinsics = None pose: np.ndarray = None def __post_init__(self): pass def get_rays(self) -> np.ndarray: """ Get the lidar rays in world coordinates. Returns: rays: (N, 6) float32. - rays[:, :3]: origin in world coordinates. - rays[:, 3:]: direction in world coordinates. - Rays are normalized (norm of direction = 1). """ assert isinstance(self.intrinsics, LidarIntrinsics) assert isinstance(self.pose, np.ndarray) if ( self.intrinsics.vertical_degrees is None or len(self.intrinsics.vertical_degrees) == 0 ): rays_o, rays_d = Lidar._gen_lidar_rays( pose=self.pose, fov_up=self.intrinsics.fov_up, fov_down=self.intrinsics.fov_down, H=self.intrinsics.vertical_res, W=self.intrinsics.horizontal_res, ) else: rays_o, rays_d = Lidar._gen_lidar_rays_with_vertical_degrees( pose=self.pose, vertical_degrees=self.intrinsics.vertical_degrees, W=self.intrinsics.horizontal_res, ) rays = np.concatenate([rays_o, rays_d], axis=-1) return rays @staticmethod def _gen_lidar_rays(pose, fov_up, fov_down, H, W): """ Get lidar rays for a single pose using NumPy, with separate upward and downward fields of view. The function generates rays in row-major order, meaning that rays are ordered as they would appear in an image, with rows being contiguous. Args: pose: [4, 4] array, camera-to-world transformation matrix. fov_up: float, the upward field of view in degrees. fov_down: float, the downward field of view in degrees. H: int, vertical resolution of the lidar sensor. W: int, horizontal resolution of the lidar sensor. Returns: A tuple of (rays_o, rays_d) rays_o: [N, 3] array, the origins of the lidar rays. The ordering of the rays is row-major, i.e., rays_o[0] corresponds to the top-left corner pixel of the range image, and rays_o[N-1] to the bottom-right corner. rays_d: [N, 3] array, the directions of the lidar rays, corresponding to the origins in rays_o. """ # Creating a meshgrid for horizontal and vertical indices. j, i = np.meshgrid(np.arange(H), np.arange(W), indexing="ij") # Reshaping indices for ray calculation. i = i.reshape([H * W]) j = j.reshape([H * W]) # Calculating horizontal and vertical angles for each ray. total_fov = fov_up + fov_down beta = -(i - W / 2) / W * 2 * np.pi alpha = (fov_up - j / H * total_fov) / 180 * np.pi # Calculating direction vectors for each ray. directions = np.stack( [ np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta), np.sin(alpha), ], axis=-1, ) # Transforming direction vectors with the pose. rays_d = np.dot(directions, pose[:3, :3].T) # (N, 3) rays_o = pose[:3, 3] # [3] # Expanding for each ray. rays_o = np.expand_dims(rays_o, axis=0).repeat(len(directions), axis=0) return rays_o, rays_d @staticmethod def _gen_lidar_rays_with_vertical_degrees(pose, vertical_degrees, W): """ Get lidar rays using specific vertical degrees for each ray, using NumPy. The function generates rays in row-major order, where rays are ordered as they would appear in an image, with rows being contiguous. Args: pose: [4, 4] array, camera-to-world transformation matrix. vertical_degrees: List[float], the vertical angles for each ray in degrees. W: int, the horizontal resolution of the lidar sensor. Returns: A tuple of (rays_o, rays_d) rays_o: [N, 3] array, the origins of the lidar rays. The ordering of the rays is row-major, i.e., rays_o[0] corresponds to the top-left corner pixel of the range image, and rays_o[N-1] to the bottom-right corner. rays_d: [N, 3] array, the directions of the lidar rays, corresponding to the origins in rays_o. """ H = len(vertical_degrees) # Creating a meshgrid for horizontal and vertical indices with 'ij' indexing. j, i = np.meshgrid(np.arange(H), np.arange(W), indexing="ij") # Reshaping indices for ray calculation. i = i.reshape([H * W]) j = j.reshape([H * W]) # Calculating horizontal and vertical angles for each ray. beta = -(i - W / 2) / W * 2 * np.pi alpha = np.array(vertical_degrees) / 180 * np.pi # Mapping vertical index to its corresponding angle. alpha = alpha[j] # Calculating direction vectors for each ray. directions = np.stack( [ np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta), np.sin(alpha), ], axis=-1, ) # Transforming direction vectors with the pose. rays_d = np.dot(directions, pose[:3, :3].T) # (N, 3) rays_o = pose[:3, 3] # [3] # Expanding for each ray. rays_o = np.expand_dims(rays_o, axis=0).repeat(H * W, axis=0) return rays_o, rays_d def main(): script_dir = Path(__file__).parent.absolute().resolve() lit_root = script_dir.parent.parent test_data_dir = lit_root / "data" / "test_data" raycast_data_path = test_data_dir / "raycast_data.pkl" raycast_mesh_path = test_data_dir / "raycast_mesh.ply" with open(raycast_data_path, "rb") as f: raycast_data = pickle.load(f) raycast_mesh = o3d.io.read_triangle_mesh(str(raycast_mesh_path)) raycast_mesh.compute_vertex_normals() # Plot points = raycast_data["points"] rays = raycast_data["rays"] pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) o3d.visualization.draw_geometries([raycast_mesh, pcd]) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/lidar_utils.py
Python
""" Lidar modeling. """ import numpy as np import torch from packaging import version as pver def custom_meshgrid(*args): if pver.parse(torch.__version__) < pver.parse("1.10"): return torch.meshgrid(*args) else: return torch.meshgrid(*args, indexing="ij") def crop_points(points, x_min, y_min, z_min, x_max, y_max, z_max, invert=False): def get_crop_indices(points, x_min, y_min, z_min, x_max, y_max, z_max, invert): crop_indices = np.nonzero( (points[:, 0] >= x_min) & (points[:, 0] <= x_max) & (points[:, 1] >= y_min) & (points[:, 1] <= y_max) & (points[:, 2] >= z_min) & (points[:, 2] <= z_max) )[0] if invert: crop_indices = np.setdiff1d(np.arange(len(points)), crop_indices) return crop_indices indices = get_crop_indices( points, x_min, y_min, z_min, x_max, y_max, z_max, invert=invert ) return points[indices] def lidar_to_pano_with_intensities_with_bbox_mask( local_points_with_intensities: np.ndarray, lidar_H: int, lidar_W: int, lidar_K: int, bbox_local: np.ndarray, max_depth=80, max_intensity=255.0, ): """ Convert lidar frame to pano frame with intensities with bbox_mask. Lidar points are in local coordinates. Args: local_points: (N, 4), float32, in lidar frame, with intensities. lidar_H: pano height. lidar_W: pano width. lidar_K: lidar intrinsics. bbox_local: (8x4), world bbox in local. max_depth: max depth in meters. max_intensity: max intensity. Return: pano: (H, W), float32. intensities: (H, W), float32. """ # Un pack. local_points = local_points_with_intensities[:, :3] local_point_intensities = local_points_with_intensities[:, 3] fov_up, fov = lidar_K fov_down = fov - fov_up # Compute dists to lidar center. dists = np.linalg.norm(local_points, axis=1) # Fill pano and intensities. pano = np.zeros((lidar_H, lidar_W)) intensities = np.zeros((lidar_H, lidar_W)) # bbox mask pano[:, :] = -1 r_min, r_max, c_min, c_max = 1e5, -1, 1e5, -1 for bbox_local_point in bbox_local: x, y, z, _ = bbox_local_point beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) + fov_down / 180 * np.pi c = int(round(beta / (2 * np.pi / lidar_W))) r = int(round(lidar_H - alpha / (fov / 180 * np.pi / lidar_H))) # Check out-of-bounds. if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue else: r_min, r_max, c_min, c_max = ( min(r_min, r), max(r_max, r), min(c_min, c), max(c_max, c), ) pano[r_min:r_max, c_min:c_max] = 0 # Fill pano and intensities. for local_points, dist, local_point_intensity in zip( local_points, dists, local_point_intensities, ): # Check max depth. if dist >= max_depth: continue x, y, z = local_points beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) + fov_down / 180 * np.pi c = int(round(beta / (2 * np.pi / lidar_W))) r = int(round(lidar_H - alpha / (fov / 180 * np.pi / lidar_H))) # Check out-of-bounds. if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue # Set to min dist if not set. if pano[r, c] == 0.0: pano[r, c] = dist intensities[r, c] = local_point_intensity / max_intensity elif pano[r, c] > dist: pano[r, c] = dist intensities[r, c] = local_point_intensity / max_intensity return pano, intensities def lidar_to_pano_with_intensities( local_points_with_intensities: np.ndarray, lidar_H: int, lidar_W: int, lidar_K: int, max_depth=80, ): """ Convert lidar frame to pano frame with intensities. Lidar points are in local coordinates. Args: local_points: (N, 4), float32, in lidar frame, with intensities. lidar_H: pano height. lidar_W: pano width. lidar_K: lidar intrinsics. max_depth: max depth in meters. Return: pano: (H, W), float32. intensities: (H, W), float32. """ # Un pack. local_points = local_points_with_intensities[:, :3] local_point_intensities = local_points_with_intensities[:, 3] fov_up, fov = lidar_K fov_down = fov - fov_up # Compute dists to lidar center. dists = np.linalg.norm(local_points, axis=1) # Fill pano and intensities. pano = np.zeros((lidar_H, lidar_W)) intensities = np.zeros((lidar_H, lidar_W)) for local_points, dist, local_point_intensity in zip( local_points, dists, local_point_intensities, ): # Check max depth. if dist >= max_depth: continue x, y, z = local_points beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) + fov_down / 180 * np.pi c = int(round(beta / (2 * np.pi / lidar_W))) r = int(round(lidar_H - alpha / (fov / 180 * np.pi / lidar_H))) # Check out-of-bounds. if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue # Set to min dist if not set. if pano[r, c] == 0.0: pano[r, c] = dist intensities[r, c] = local_point_intensity elif pano[r, c] > dist: pano[r, c] = dist intensities[r, c] = local_point_intensity return pano, intensities def lidar_to_pano( local_points: np.ndarray, lidar_H: int, lidar_W: int, lidar_K: int, max_dpeth=80 ): """ Convert lidar frame to pano frame. Lidar points are in local coordinates. Args: local_points: (N, 3), float32, in lidar frame. lidar_H: pano height. lidar_W: pano width. lidar_K: lidar intrinsics. max_depth: max depth in meters. Return: pano: (H, W), float32. """ # (N, 3) -> (N, 4), filled with zeros. local_points_with_intensities = np.concatenate( [local_points, np.zeros((local_points.shape[0], 1))], axis=1 ) pano, _ = lidar_to_pano_with_intensities( local_points_with_intensities=local_points_with_intensities, lidar_H=lidar_H, lidar_W=lidar_W, lidar_K=lidar_K, max_dpeth=max_dpeth, ) return pano def pano_to_lidar_with_intensities(pano: np.ndarray, intensities, lidar_K): """ Args: pano: (H, W), float32. intensities: (H, W), float32. lidar_K: lidar intrinsics (fov_up, fov) Return: local_points_with_intensities: (N, 4), float32, in lidar frame. """ fov_up, fov = lidar_K H, W = pano.shape i, j = np.meshgrid( np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy" ) beta = -(i - W / 2) / W * 2 * np.pi alpha = (fov_up - j / H * fov) / 180 * np.pi dirs = np.stack( [ np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta), np.sin(alpha), ], -1, ) local_points = dirs * pano.reshape(H, W, 1) # local_points: (H, W, 3) # intensities : (H, W) # local_points_with_intensities: (H, W, 4) local_points_with_intensities = np.concatenate( [local_points, intensities.reshape(H, W, 1)], axis=2 ) # Filter empty points. idx = np.where(pano != 0.0) local_points_with_intensities = local_points_with_intensities[idx] return local_points_with_intensities def pano_to_lidar(pano, lidar_K): """ Args: pano: (H, W), float32. lidar_K: lidar intrinsics (fov_up, fov) Return: local_points: (N, 3), float32, in lidar frame. """ local_points_with_intensities = pano_to_lidar_with_intensities( pano=pano, intensities=np.zeros_like(pano), lidar_K=lidar_K, ) return local_points_with_intensities[:, :3] def lidar_to_pano_with_intensities_fpa( local_points_with_intensities: np.ndarray, lidar_H: int, lidar_W: int, lidar_K: int, max_depth=80, z_buffer_len=10, ): """ Convert lidar frame to pano frame with intensities with bbox_mask. Lidar points are in local coordinates. Args: local_points: (N, 4), float32, in lidar frame, with intensities. lidar_H: pano height. lidar_W: pano width. lidar_K: lidar intrinsics. max_depth: max depth in meters. z_buffer_len: length of the z_buffer. Return: rangeview image: (H, W, 3), float32. """ # Un pack. local_points = local_points_with_intensities[:, :3] local_point_intensities = local_points_with_intensities[:, 3] fov_up, fov = lidar_K fov_down = fov - fov_up # Compute dists to lidar center. dists = np.linalg.norm(local_points, axis=1) # Fill pano and intensities. range_view = np.zeros((lidar_H, lidar_W, 3, z_buffer_len + 1)) for local_point, dist, local_point_intensity in zip( local_points, dists, local_point_intensities, ): # Check max depth. if dist >= max_depth: continue x, y, z = local_point beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) + fov_down / 180 * np.pi c = int(round(beta / (2 * np.pi / lidar_W))) r = int(round(lidar_H - alpha / (fov / 180 * np.pi / lidar_H))) if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue position = range_view[r, c, 2, 0] + 1 if position > z_buffer_len: depth_z_buffer = list(range_view[r, c, 2][1:]) + [dist] intensity_z_buffer = list(range_view[r, c, 1][1:]) + [local_point_intensity] position = position - 1 sort_index = np.argsort(depth_z_buffer) depth_z_buffer = np.insert( np.array(depth_z_buffer)[sort_index][:z_buffer_len], 0, position ) intensity_z_buffer = np.insert( np.array(intensity_z_buffer)[sort_index][:z_buffer_len], 0, position ) range_view[r, c, 2] = depth_z_buffer range_view[r, c, 1] = intensity_z_buffer else: range_view[r, c, 2, int(position)] = dist range_view[r, c, 1, int(position)] = local_point_intensity range_view[r, c, 2, 0] = position range_view = parse_z_buffer(range_view, lidar_H, lidar_W) return range_view[:, :, 2], range_view[:, :, 1] def parse_z_buffer(novel_pano, lidar_H, lidar_W, threshold=0.2): range_view = np.zeros((lidar_H, lidar_W, 3)) for i in range(lidar_H): for j in range(lidar_W): range_pixel = novel_pano[i, j, 2] intensity_pixel = novel_pano[i, j, 1] z_buffer_num = int(range_pixel[0]) if z_buffer_num == 0: continue if z_buffer_num == 1: range_view[i][j][2] = range_pixel[1] range_view[i][j][1] = intensity_pixel[1] continue depth_z_buffer = range_pixel[1:z_buffer_num] cloest_points = min(depth_z_buffer) index = depth_z_buffer <= (cloest_points + threshold) final_depth_z_buffer = np.array(depth_z_buffer)[index] final_dis = np.average( final_depth_z_buffer, weights=1 / final_depth_z_buffer ) range_view[i][j][2] = final_dis intensity_z_buffer = intensity_pixel[1:z_buffer_num] final_intensity_z_buffer = np.array(intensity_z_buffer)[index] final_intensity = np.average( final_intensity_z_buffer, weights=1 / final_depth_z_buffer ) range_view[i][j][1] = final_intensity return range_view def main(): pass if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/mvfg_utils.py
Python
import pickle from pathlib import Path import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data from torch.utils.data import DataLoader, Dataset from pcdet.ops.pointnet2.pointnet2_stack.pointnet2_utils import furthest_point_sample def fps_sampling(points: np.ndarray, num_fps_samples: int, allow_fewer_points=True): """ Wrapper function for furthest point sampling on NumPy arrays. Args: points: (N, 3) NumPy array of points. num_fps_samples: int, number of points to sample. allow_fewer_points: bool, if True, allow fewer points to be sampled if the input points have fewer points than num_fps_samples, directly returning a clone of the points if there are not enough points to sample from. If False, first we augment the points by duplicating some of them, and this will guarantee (num_fps_samples, 3) is returned. Returns: sampled_points: (num_fps_samples, 3) NumPy array of the sampled points or the original points if allow_fewer_points is True and points have fewer points than num_fps_samples. """ assert isinstance(points, np.ndarray), "points must be a NumPy array" assert points.ndim == 2 and points.shape[1] == 3, "points must be (N, 3)" N = points.shape[0] if N <= num_fps_samples: if allow_fewer_points: return points.copy() else: additional_indices = np.random.choice(N, num_fps_samples - N, replace=True) points = np.concatenate([points, points[additional_indices]], axis=0) points_tensor = torch.from_numpy(points).float().cuda() points_tensor = points_tensor.unsqueeze(0).contiguous() # Add batch dimension sampled_indices_tensor = furthest_point_sample( points_tensor, num_fps_samples ).long() sampled_indices = sampled_indices_tensor[0].cpu().numpy() sampled_points = points[sampled_indices] return sampled_points class MVFGDataset(Dataset): def __init__(self, mvfg_dir): self.mvfg_dir = Path(mvfg_dir) self.mvfg_files = [] for scene_dir in self.mvfg_dir.iterdir(): if scene_dir.is_dir(): self.mvfg_files.extend(scene_dir.glob("*.pkl")) def __len__(self): return len(self.mvfg_files) def __getitem__(self, idx): mvfg_path = self.mvfg_files[idx] with open(mvfg_path, "rb") as f: fgmv = pickle.load(f) # Return as a dictionary return { "mv_enforced_fps_deepsdf_points": fgmv["mv_enforced_fps_deepsdf_points"], "fused_deepsdf_latent": fgmv["fused_deepsdf_latent"], "gt_latent": fgmv["gt_latent"], } def collate_fn(batch): mv_enforced_fps_deepsdf_points = torch.tensor( np.array([item["mv_enforced_fps_deepsdf_points"] for item in batch]), dtype=torch.float32, ) fused_deepsdf_latent = torch.stack( [ torch.tensor(item["fused_deepsdf_latent"], dtype=torch.float32) for item in batch ] ) gt_latent = torch.stack( [torch.tensor(item["gt_latent"], dtype=torch.float32) for item in batch] ) return { "mv_enforced_fps_deepsdf_points": mv_enforced_fps_deepsdf_points, "fused_deepsdf_latent": fused_deepsdf_latent, "gt_latent": gt_latent, } def get_mvfg_dataloader(mvfg_dir, batch_size=1, shuffle=True, num_workers=0): dataset = MVFGDataset(mvfg_dir) data_loader = DataLoader( dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn, ) return data_loader class SharedPCNEncoderV1(nn.Module): # 694,272 trainable params def __init__(self): super(SharedPCNEncoderV1, self).__init__() self.mlp1 = nn.Conv1d(3, 128, 1) self.mlp2 = nn.Conv1d(128, 256, 1) self.mlp3 = nn.Conv1d(256, 512, 1) self.mlp4 = nn.Conv1d(512, 1024, 1) self.bn1 = nn.BatchNorm1d(128) self.bn2 = nn.BatchNorm1d(256) self.bn3 = nn.BatchNorm1d(512) self.bn4 = nn.BatchNorm1d(1024) self.tanh = nn.Tanh() def forward(self, x): x = F.relu(self.bn1(self.mlp1(x))) x = F.relu(self.bn2(self.mlp2(x))) x = F.relu(self.bn3(self.mlp3(x))) x = F.relu(self.bn4(self.mlp4(x))) x = torch.max(x, 2, keepdim=True)[0] x = x.view(-1, 1024) x = self.tanh(x) return x class SharedPCNEncoderV2(nn.Module): # 822,784 trainable params def __init__(self): super(SharedPCNEncoderV2, self).__init__() self.mlp1 = nn.Conv1d(3, 128, 1) self.bn1 = nn.BatchNorm1d(128) self.mlp2 = nn.Conv1d(128, 256, 1) self.mlp3 = nn.Conv1d(512, 512, 1) self.bn3 = nn.BatchNorm1d(512) self.mlp4 = nn.Conv1d(512, 1024, 1) self.tanh = nn.Tanh() def forward(self, x): """ x: (B, 3, N) """ # (B, 128, N) x = F.relu(self.bn1(self.mlp1(x))) # (B, 256, N) x = self.mlp2(x) # (B, 256, 1) x_max_pooled = torch.max(x, 2, keepdim=True)[0] # (B, 256, N) x_repeated = x_max_pooled.repeat(1, 1, x.size(2)) # (B, 512, N) x = torch.cat((x, x_repeated), dim=1) # (B, 512, N) x = F.relu(self.bn3(self.mlp3(x))) # (B, 1024, N) x = self.mlp4(x) # MLP4 without BN and ReLU # (B, 1024) x = torch.max(x, 2, keepdim=False)[0] # (B, 1024) x = self.tanh(x) return x class MVDeepSDFModel(nn.Module): def __init__(self, ckpt_path=None): super(MVDeepSDFModel, self).__init__() self.shared_pc_encoder = SharedPCNEncoderV2() self.fc = nn.Linear(1280, 256) if ckpt_path: self._initialize_from_ckpt(ckpt_path) def forward(self, fused_deepsdf_latent, mv_points): """ Args: fused_deepsdf_latent: (256,) points: (B, 3, N) """ # Extract global features from mv_points global_features = self.shared_pc_encoder(mv_points) # Concatenate repeated deepsdf_latent with global features B = mv_points.size(0) fused_deepsdf_latent_repeated = fused_deepsdf_latent.repeat(B, 1) concatenated_features = torch.cat( (fused_deepsdf_latent_repeated, global_features), dim=1 ) # Average pooling across the batch dimension pooled_features = torch.mean(concatenated_features, dim=0, keepdim=True) # Get predicted latent code (pd_latent) pd_latent = self.fc(pooled_features) pd_latent = pd_latent.squeeze(0) # (1, 256) -> (256,) return pd_latent def _initialize_from_ckpt(self, ckpt_path): """ Initializes model weights from a given checkpoint path. """ try: print(f"Loading checkpoint '{ckpt_path}'") ckpt = torch.load(ckpt_path, map_location="cpu") if "state_dict" in ckpt: model_state_dict = self.state_dict() loaded_state_dict = ckpt["state_dict"] for name, param in loaded_state_dict.items(): if name not in model_state_dict: raise KeyError( f"Param '{name}' found in checkpoint but not in model parameters." ) if model_state_dict[name].shape != param.shape: raise ValueError( f"Shape mismatch for '{name}': model param " f"{model_state_dict[name].shape}, " f"checkpoint param {param.shape}." ) if model_state_dict[name].dtype != param.dtype: raise TypeError( f"Type mismatch for '{name}': model param " f"{model_state_dict[name].dtype}, " f"checkpoint param {param.dtype}." ) self.load_state_dict(loaded_state_dict) print("Loaded model weights from checkpoint") else: raise KeyError("Checkpoint does not contain 'state_dict' key") except Exception as e: raise RuntimeError( f"Failed to load checkpoint from '{ckpt_path}'. Error: {e}" )
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/network_utils.py
Python
import numpy as np import sklearn import torch class Embedder: def __init__(self, **kwargs): self.kwargs = kwargs self.create_embedding_fn() def create_embedding_fn(self): embed_fns = [] d = self.kwargs["input_dims"] out_dim = 0 if self.kwargs["include_input"]: embed_fns.append(lambda x: x) out_dim += d max_freq = self.kwargs["max_freq_log2"] N_freqs = self.kwargs["num_freqs"] if self.kwargs["log_sampling"]: freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs) else: freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs) for freq in freq_bands: for p_fn in self.kwargs["periodic_fns"]: embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) out_dim += d self.embed_fns = embed_fns self.out_dim = out_dim def embed(self, inputs): return torch.cat([fn(inputs) for fn in self.embed_fns], -1) def get_embed_fn(multires, input_dims=3): embed_kwargs = { "include_input": True, "input_dims": input_dims, "max_freq_log2": multires - 1, "num_freqs": multires, "log_sampling": True, "periodic_fns": [torch.sin, torch.cos], } embedder = Embedder(**embed_kwargs) embed_fn = lambda x, eo=embedder: eo.embed(x) out_dim = embedder.out_dim return embed_fn, out_dim def load_raykeep_dataset(file_path, split_ratio=0.8, balance=False): """ Load and split the ray keep dataset with an option to balance the dataset. Args: file_path (str): Path to the dataset file. split_ratio (float): Ratio of the dataset to use for training. balance (bool): Whether to balance the dataset by undersampling the majority class. """ # Save random state. np_random_state = np.random.get_state() np.random.seed(0) # Load dataset data = np.load(file_path) network_inputs = data["network_inputs"] network_outputs = data["network_outputs"] # Shuffle dataset shuffle_indices = np.random.permutation(len(network_inputs)) network_inputs = network_inputs[shuffle_indices] network_outputs = network_outputs[shuffle_indices] # Assert: check that the network_outputs is either 0 or 1. assert np.isin(network_outputs, [0, 1]).all() # Split train/val split_index = int(len(network_inputs) * split_ratio) train_inputs = network_inputs[:split_index] train_outputs = network_outputs[:split_index] val_inputs = network_inputs[split_index:] val_outputs = network_outputs[split_index:] # Balance training dataset if required if balance: keep_indices = np.where(train_outputs == 1)[0] drop_indices = np.where(train_outputs == 0)[0] if len(keep_indices) < len(drop_indices): drop_indices = np.random.choice( drop_indices, len(keep_indices), replace=False ) else: keep_indices = np.random.choice( keep_indices, len(drop_indices), replace=False ) balanced_indices = np.concatenate([keep_indices, drop_indices]) np.random.shuffle(balanced_indices) train_inputs = train_inputs[balanced_indices] train_outputs = train_outputs[balanced_indices] # Restore random state np.random.set_state(np_random_state) return train_inputs, train_outputs, val_inputs, val_outputs def eval_raykeep_metrics(pd_raykeeps: np.ndarray, gt_raykeeps: np.ndarray): """ Evaluate the performance of a ray keep prediction model. Args: pd_raykeeps (np.ndarray): Predicted ray keep values (probabilities rounded to 0 or 1). gt_raykeeps (np.ndarray): Ground truth ray keep values (0 or 1). Raises: ValueError: If input arrays are not 1D, have mismatched shapes, or contain incorrect data. """ # Check if inputs are numpy arrays and are 1D if not isinstance(pd_raykeeps, np.ndarray): raise ValueError("Inputs must be numpy arrays") if not isinstance(gt_raykeeps, np.ndarray): raise ValueError("Inputs must be numpy arrays") if pd_raykeeps.ndim != 1 or gt_raykeeps.ndim != 1: raise ValueError("Inputs must be 1-dimensional arrays") # Check if shapes of the input arrays match if pd_raykeeps.shape != gt_raykeeps.shape: raise ValueError("Shape of predicted and ground truth arrays must match") # Check if inputs are binary (0 or 1) if not (np.isin(pd_raykeeps, [0, 1]).all() and np.isin(gt_raykeeps, [0, 1]).all()): raise ValueError("Values in both arrays must be either 0 or 1") # Ensure inputs are integer type pd_raykeeps = pd_raykeeps.astype(np.int_) gt_raykeeps = gt_raykeeps.astype(np.int_) # Confusion Matrix and other metrics tn, fp, fn, tp = sklearn.metrics.confusion_matrix(gt_raykeeps, pd_raykeeps).ravel() precision = tp / (tp + fp) if (tp + fp) > 0 else 0 recall = tp / (tp + fn) if (tp + fn) > 0 else 0 f1_score = ( 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0 ) accuracy = sklearn.metrics.accuracy_score(gt_raykeeps, pd_raykeeps) gt_val_drop_ratio = 1 - np.mean(gt_raykeeps) pd_val_drop_ratio = 1 - np.mean(pd_raykeeps) # Print the evaluation metrics print(f"Precision : {precision:.03f}") print(f"Recall : {recall:.03f}") print(f"F1 score : {f1_score:.03f}") print(f"Accuracy : {accuracy:.03f}") print(f"Gt drop ratio : {gt_val_drop_ratio:.03f}") print(f"Pred drop ratio: {pd_val_drop_ratio:.03f}") def main(): pass if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/path_utils.py
Python
from dataclasses import dataclass, field from pathlib import Path from types import SimpleNamespace from typing import List, Union _script_dir = Path(__file__).parent.resolve().absolute() @dataclass class LitPaths: data_version: str = None data_domain: str = None scene_names: List[str] = field(default_factory=list) lit_data_root: Path = None scene_dir: Path = None fg_dir: Path = None bg_dir: Path = None sim_waymo_dir: Path = None sim_nuscenes_dir: Path = None sim_kitti_dir: Path = None # Private and static fields. _lit_root = _script_dir.parent _waymo_lit_data_root = _lit_root / "lit_data" / "waymo" _nuscenes_lit_data_root = _lit_root / "lit_data" / "nuscenes" _lit_split_dir = _lit_root / "lit_split" def __repr__(self): return ( f"LitPaths(\n" f" data_version={self.data_version}, \n" f" data_domain={self.data_domain}, \n" f" scene_names=[List of {len(self.scene_names)} scene names], \n" f" lit_data_root={self.lit_data_root}, \n" f" scene_dir={self.scene_dir}, \n" f" fg_dir={self.fg_dir}, \n" f" bg_dir={self.bg_dir}, \n" f" sim_waymo_dir={self.sim_waymo_dir}, \n" f" sim_nuscenes_dir={self.sim_nuscenes_dir}, \n" f" sim_kitti_dir={self.sim_kitti_dir},\n" f")" ) @staticmethod def _load_scene_names(lit_split_path: Path) -> List[str]: with open(lit_split_path, "r") as f: scene_names = f.read().splitlines() return scene_names @classmethod def from_relative_paths( cls, data_version: str, data_domain: str, scene_list_path_rel: Union[Path, str], scene_dir_rel: Union[Path, str], fg_dir_rel: Union[Path, str], bg_dir_rel: Union[Path, str], sim_waymo_dir_rel: Union[Path, str], sim_nuscenes_dir_rel: Union[Path, str], sim_kitti_dir_rel: Union[Path, str], ): if data_domain == "waymo": lit_data_root = LitPaths._waymo_lit_data_root elif data_domain == "nuscenes": lit_data_root = LitPaths._nuscenes_lit_data_root else: raise ValueError(f"Unknown data_domain: {data_domain}") # Load scene names. scene_list_path = LitPaths._lit_split_dir / scene_list_path_rel scene_names = LitPaths._load_scene_names(scene_list_path) # Construct LitPaths. lit_paths = cls( data_version=data_version, data_domain=data_domain, scene_names=scene_names, lit_data_root=lit_data_root, scene_dir=lit_data_root / scene_dir_rel, fg_dir=lit_data_root / fg_dir_rel, bg_dir=lit_data_root / bg_dir_rel, sim_waymo_dir=lit_data_root / sim_waymo_dir_rel, sim_nuscenes_dir=lit_data_root / sim_nuscenes_dir_rel, sim_kitti_dir=lit_data_root / sim_kitti_dir_rel, ) return lit_paths _lit_paths_versions = { # fmt: off # v0: full waymo/nuscenes scenes, with default reconstruction # - # Waymo scenes : 1000 # - # NuScenes scenes: 840 "v0": { "waymo": LitPaths.from_relative_paths( data_version = "v0", data_domain = "waymo", scene_list_path_rel = "waymo_scene_list_v0.txt", scene_dir_rel = "scene", fg_dir_rel = "fg_v0", bg_dir_rel = "bg_v0", sim_waymo_dir_rel = "sim_waymo_v0", sim_nuscenes_dir_rel = "sim_nuscenes_v0", sim_kitti_dir_rel = "sim_kitti_v0", ), "nuscenes": LitPaths.from_relative_paths( data_version = "v0", data_domain = "nuscenes", scene_list_path_rel = "nuscenes_scene_list_v0.txt", scene_dir_rel = "scene", fg_dir_rel = "fg_v0", bg_dir_rel = "bg_v0", sim_waymo_dir_rel = "sim_waymo_v0", sim_nuscenes_dir_rel = "sim_nuscenes_v0", sim_kitti_dir_rel = "sim_kitti_v0", ), }, # v1: a subset of scenes # - # Waymo scenes : 350 # - # NuScenes scenes: 350 "v1": { "waymo": LitPaths.from_relative_paths( data_version = "v1", data_domain = "waymo", scene_list_path_rel = "waymo_scene_list_v1.txt", scene_dir_rel = "scene", fg_dir_rel = "fg_v1", bg_dir_rel = "bg_v1", sim_waymo_dir_rel = "sim_waymo_v1", sim_nuscenes_dir_rel = "sim_nuscenes_v1", sim_kitti_dir_rel = "sim_kitti_v1", ), "nuscenes": LitPaths.from_relative_paths( data_version = "v1", data_domain = "nuscenes", scene_list_path_rel = "nuscenes_scene_list_v1.txt", scene_dir_rel = "scene", fg_dir_rel = "fg_v1", bg_dir_rel = "bg_v1", sim_waymo_dir_rel = "sim_waymo_v1", sim_nuscenes_dir_rel = "sim_nuscenes_v1", sim_kitti_dir_rel = "sim_kitti_v1", ), }, # fmt: on } def get_lit_paths(data_version: str, data_domain: str) -> SimpleNamespace: """ Return the lit_paths for a given data_version and data_domain. """ if ( data_version not in _lit_paths_versions or data_domain not in _lit_paths_versions[data_version] ): raise ValueError( f"data_version={data_version}, " f"data_domain={data_domain} is not supported." ) lit_paths = _lit_paths_versions[data_version][data_domain] print(f"Loaded lit_paths:\n{lit_paths}") return lit_paths
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/raycast_engine.py
Python
""" Base class for Raycast engines for mesh-ray intersection. """ from abc import ABC, abstractmethod import numpy as np import open3d as o3d from lit.lidar import Lidar class RaycastEngine(ABC): """ Abstract class for raycast engine. Notes: - Currently we assume a scene is only used for raycasting once. To support multiple raycastings per scene, APIs need to be changed to store the raycasting scene in the class. """ @abstractmethod def __init__(self): pass @abstractmethod def rays_intersect_mesh( self, rays: np.ndarray, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the given rays. Args: rays: (N, 6) float32 numpy array mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array """ @abstractmethod def lidar_intersect_mesh( self, lidar: Lidar, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the lidar rays. Args: lidar: Lidar mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array """
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/raycast_engine_cpu.py
Python
""" CPU Raycast engine for mesh-ray intersection. """ import numpy as np import open3d as o3d from lit.lidar import Lidar from lit.raycast_engine import RaycastEngine class RaycastEngineCPU(RaycastEngine): """ CPU implementation of raycast engine based on Open3D and Embree. """ def __init__(self): super().__init__() def rays_intersect_mesh( self, rays: np.ndarray, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the given rays. Args: rays: (N, 6) float32 numpy array mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array """ # Sanity checks. if not isinstance(rays, np.ndarray): raise TypeError("rays must be a numpy array.") if rays.ndim != 2 or rays.shape[1] != 6: raise ValueError("rays must be a (N, 6) array.") # Convert mesh to raycasting_scene. raycasting_scene = o3d.t.geometry.RaycastingScene() raycasting_scene.add_triangles(o3d.t.geometry.TriangleMesh.from_legacy(mesh)) # Run ray cast. rays = rays.astype(np.float32) ray_cast_results = raycasting_scene.cast_rays(o3d.core.Tensor(rays)) normals = ray_cast_results["primitive_normals"].numpy() depths = ray_cast_results["t_hit"].numpy() masks = depths != np.inf rays_o = rays[:, :3] rays_d = rays[:, 3:] rays_d = rays_d / np.linalg.norm(rays_d, axis=1, keepdims=True) points = rays_o + rays_d * depths[:, None] # Filter by hit masks. hit_dict = { "masks": masks, "depths": depths, "points": points, "normals": normals, } points = hit_dict["points"][hit_dict["masks"]] return points def lidar_intersect_mesh( self, lidar: Lidar, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the lidar rays. Args: lidar: Lidar mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array """ rays = lidar.get_rays() points = self.rays_intersect_mesh(mesh=mesh, rays=rays) # Post-processing: filter points by range. lidar_center = lidar.pose[:3, 3] point_dists = np.linalg.norm(points - lidar_center, axis=1) points = points[point_dists < lidar.intrinsics.max_range] return points
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/raycast_engine_gpu.cu
CUDA
// // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <math_constants.h> #include <optix.h> #include "vector_math.h" struct Params { unsigned int width; unsigned int height; float3* hits; // (W, H, 3), hit coordinates float* incident_angles; // (W, H), incident angle float3* rays_o; float3* rays_d; OptixTraversableHandle gas_handle; }; struct RayGenData { // No data needed }; struct MissData { float3 bg_color; }; struct HitGroupData { float3* vertex_buffer; int3* triangle_buffer; }; extern "C" { __constant__ Params params; } static __forceinline__ __device__ void computeRay(uint3 idx, uint3 dim, float3& origin, float3& direction) { origin = params.rays_o[idx.y * params.width + idx.x]; direction = params.rays_d[idx.y * params.width + idx.x]; } extern "C" __global__ void __raygen__rg() { // Lookup our location within the launch grid. const uint3 idx = optixGetLaunchIndex(); const uint3 dim = optixGetLaunchDimensions(); // Map our launch idx to a screen location and create a ray from the camera // location through the screen float3 ray_origin, ray_direction; computeRay(make_uint3(idx.x, idx.y, 0), dim, ray_origin, ray_direction); // Trace the ray against our scene hierarchy unsigned int p0; unsigned int p1; unsigned int p2; unsigned int p3; optixTrace(params.gas_handle, // Traversable handle ray_origin, // float3 ray_direction, // float3 0.0f, // Min intersection distance 1e16f, // Max intersection distance 0.0f, // rayTime -- used for motion blur OptixVisibilityMask(255), // Specify always visible OPTIX_RAY_FLAG_NONE, 0, // SBT offset -- See SBT discussion 1, // SBT stride -- See SBT discussion 0, // missSBTIndex -- See SBT discussion p0, // optixSetPayload_0, returned from hit or miss kernel p1, // optixSetPayload_1, returned from hit or miss kernel p2, // optixSetPayload_2, returned from hit or miss kernel p3 // optixSetPayload_3, returned from hit or miss kernel ); // Convert the ray cast result values back to floats. float3 hit = make_float3(0); hit.x = int_as_float(p0); hit.y = int_as_float(p1); hit.z = int_as_float(p2); float incident_angle = int_as_float(p3); // Record results in our output raster params.hits[idx.y * params.width + idx.x] = hit; params.incident_angles[idx.y * params.width + idx.x] = incident_angle; } extern "C" __global__ void __miss__ms() { MissData* miss_data = reinterpret_cast<MissData*>(optixGetSbtDataPointer()); // https://stackoverflow.com/a/15514595/1255535 optixSetPayload_0(float_as_int(CUDART_INF_F)); optixSetPayload_1(float_as_int(CUDART_INF_F)); optixSetPayload_2(float_as_int(CUDART_INF_F)); optixSetPayload_3(float_as_int(0)); } extern "C" __global__ void __closesthit__ch() { // Compute intersection point coordinates. const float3 ray_origin = optixGetWorldRayOrigin(); const float3 ray_direction = optixGetWorldRayDirection(); // Get the hit distance. const float t = optixGetRayTmax(); // Compute the intersection point. const float3 p = ray_origin + t * ray_direction; // Get the SBT data pointer and cast to the proper type. const HitGroupData* sbt_data = reinterpret_cast<const HitGroupData*>(optixGetSbtDataPointer()); // Cast the integer back to a pointer. float3* vertex_buffer = reinterpret_cast<float3*>(sbt_data->vertex_buffer); int3* triangle_buffer = reinterpret_cast<int3*>(sbt_data->triangle_buffer); // Retrieve the index of the hit triangle. const unsigned int primitive_index = optixGetPrimitiveIndex(); // Access the indices of the vertices that form this triangle. int3 vertex_indices = triangle_buffer[primitive_index]; // Access vertex data. const float3 v0 = vertex_buffer[vertex_indices.x]; const float3 v1 = vertex_buffer[vertex_indices.y]; const float3 v2 = vertex_buffer[vertex_indices.z]; // Compute normal. const float3 edge0 = v1 - v0; const float3 edge1 = v2 - v0; const float3 normal = normalize(cross(edge0, edge1)); // Normalize the ray direction. const float3 normalized_ray_direction = normalize(ray_direction); // Compute the dot product of the normalized ray direction and the // normalized normal. const float dot_product = dot(normalized_ray_direction, normal); // Compute the incident angle using arccos. // Clamping the dot product to the range [-1, 1] to avoid numerical issues const float incident_angle = acosf(fmaxf(-1.0f, fminf(dot_product, 1.0f))); // Set payloads. optixSetPayload_0(float_as_int(p.x)); optixSetPayload_1(float_as_int(p.y)); optixSetPayload_2(float_as_int(p.z)); optixSetPayload_3(float_as_int(incident_angle)); }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/raycast_engine_gpu.py
Python
""" GPU Raycast engine for mesh-ray intersection. """ import ctypes import os import pickle from pathlib import Path import camtools as ct import cupy as cp import numpy as np import open3d as o3d import optix from pynvrtc.compiler import Program from lit.lidar import Lidar from lit.path_utils import LitPaths from lit.raycast_engine import RaycastEngine from lit.raycast_engine_cpu import RaycastEngineCPU class RaycastEngineGPU(RaycastEngine): """ Wrapper to store "global" variables and provide a better API. """ # All possible OptiX include paths. The first one that exists will be used. otk_pyoptix_root = LitPaths._lit_root / "lit" / "extern" / "otk-pyoptix" _optix_include_paths = [ otk_pyoptix_root / "sdk/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/include", Path.home() / "bin/NVIDIA-OptiX-SDK-7.4.0-linux64-x86_64/include", ] # System-wide CUDA include path. _cuda_include_path = Path("/usr/local/cuda/include") # Additional include paths for stddef.h. This is needed for Optix 7.0. _stddef_include_path = None # Path to OptiX kernel file, including our custom kernels. _kernel_file_path = Path(__file__).parent / "raycast_engine_gpu.cu" class Logger: def __init__(self): self.num_messages = 0 def __call__(self, level, tag, mssg): print("[{:>2}][{:>12}]: {}".format(level, tag, mssg)) self.num_messages += 1 def __init__(self, verbose=False) -> None: # Create OptiX context. self.verbose = verbose self.logger = RaycastEngineGPU.Logger() self.ctx = self._create_ctx( self.logger, verbosity_level=4 if self.verbose else 0 ) # Compile OptiX pipeline. self.pipeline_options = RaycastEngineGPU._set_pipeline_options() kernel_ptx = RaycastEngineGPU._compile_cuda( str(RaycastEngineGPU._kernel_file_path) ) self.module = self._create_module( self.ctx, self.pipeline_options, kernel_ptx, ) self.prog_groups = self._create_program_groups( self.ctx, self.module, ) self.pipeline = self._create_pipeline( self.ctx, self.prog_groups, self.pipeline_options, ) # Filled by self._create_sbt(), which is called by set_geometry. self.d_raygen_sbt = None self.d_miss_sbt = None self.d_hitgroup_sbt = None self.sbt = None # These properties are set by set_geometry(). self.gas_handle = None self.d_gas_output_buffer = None self.cp_vertices = None self.cp_triangles = None def _create_ctx(self, logger, verbosity_level): # OptiX param can be set with optional keyword constructor arguments. ctx_options = optix.DeviceContextOptions( logCallbackFunction=logger, logCallbackLevel=verbosity_level ) # They can also be set and queried as properties on the struct. if optix.version()[1] >= 2: ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL cu_ctx = 0 return optix.deviceContextCreate(cu_ctx, ctx_options) def _create_accel(self, ctx, np_vertices, np_triangles): """ Args: ctx: Optix context. np_vertices: (N, 3) array of vertices. np_triangles: (M, 3) array of triangle indices. """ ct.sanity.assert_shape_nx3(np_vertices, name="np_vertices") ct.sanity.assert_shape_nx3(np_triangles, name="np_triangles") accel_options = optix.AccelBuildOptions( buildFlags=int(optix.BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS), operation=optix.BUILD_OPERATION_BUILD, ) np_vertices = np_vertices.ravel() np_triangles = np_triangles.ravel() self.cp_vertices = cp.array(np_vertices, dtype="f4") self.cp_triangles = cp.array(np_triangles, dtype="u4") triangle_input_flags = [optix.GEOMETRY_FLAG_NONE] # One flag is sufficient triangle_input = optix.BuildInputTriangleArray() triangle_input.vertexFormat = optix.VERTEX_FORMAT_FLOAT3 triangle_input.numVertices = len(self.cp_vertices) // 3 triangle_input.vertexBuffers = [self.cp_vertices.data.ptr] triangle_input.indexFormat = optix.INDICES_FORMAT_UNSIGNED_INT3 triangle_input.numIndexTriplets = len(self.cp_triangles) // 3 triangle_input.indexBuffer = self.cp_triangles.data.ptr triangle_input.flags = triangle_input_flags triangle_input.numSbtRecords = 1 gas_buffer_sizes = ctx.accelComputeMemoryUsage( [accel_options], [triangle_input] ) d_temp_buffer_gas = cp.cuda.alloc(gas_buffer_sizes.tempSizeInBytes) d_gas_output_buffer = cp.cuda.alloc(gas_buffer_sizes.outputSizeInBytes) gas_handle = ctx.accelBuild( 0, # CUDA stream [accel_options], [triangle_input], d_temp_buffer_gas.ptr, gas_buffer_sizes.tempSizeInBytes, d_gas_output_buffer.ptr, gas_buffer_sizes.outputSizeInBytes, [], # emitted properties ) return (gas_handle, d_gas_output_buffer) def _create_module(self, ctx, pipeline_options, triangle_ptx): module_options = optix.ModuleCompileOptions( maxRegisterCount=optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT, optLevel=optix.COMPILE_OPTIMIZATION_DEFAULT, debugLevel=optix.COMPILE_DEBUG_LEVEL_DEFAULT, ) module, log = ctx.moduleCreateFromPTX( module_options, pipeline_options, triangle_ptx ) return module def _create_program_groups(self, ctx, module): raygen_prog_group_desc = optix.ProgramGroupDesc() raygen_prog_group_desc.raygenModule = module raygen_prog_group_desc.raygenEntryFunctionName = "__raygen__rg" raygen_prog_group, log = ctx.programGroupCreate([raygen_prog_group_desc]) miss_prog_group_desc = optix.ProgramGroupDesc() miss_prog_group_desc.missModule = module miss_prog_group_desc.missEntryFunctionName = "__miss__ms" miss_prog_group, log = ctx.programGroupCreate([miss_prog_group_desc]) hitgroup_prog_group_desc = optix.ProgramGroupDesc() hitgroup_prog_group_desc.hitgroupModuleCH = module hitgroup_prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__ch" hitgroup_prog_group, log = ctx.programGroupCreate([hitgroup_prog_group_desc]) return [raygen_prog_group[0], miss_prog_group[0], hitgroup_prog_group[0]] def _create_pipeline(self, ctx, program_groups, pipeline_compile_options): max_trace_depth = 1 pipeline_link_options = optix.PipelineLinkOptions() pipeline_link_options.maxTraceDepth = max_trace_depth pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL log = "" pipeline = ctx.pipelineCreate( pipeline_compile_options, pipeline_link_options, program_groups, log ) stack_sizes = optix.StackSizes() for prog_group in program_groups: optix.util.accumulateStackSizes(prog_group, stack_sizes) ( dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size, ) = optix.util.computeStackSizes( stack_sizes, max_trace_depth, 0, # maxCCDepth 0, # maxDCDepth ) pipeline.setStackSize( dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size, 1, # maxTraversableDepth ) return pipeline def _create_sbt(self, prog_groups): """ TODO: this relies on self.cp_vertices and self.triangles, which are set by self._create_accel(). This is not ideal. """ raygen_prog_group, miss_prog_group, hitgroup_prog_group = prog_groups header_format = "{}B".format(optix.SBT_RECORD_HEADER_SIZE) # Raygen record formats = [header_format] itemsize = RaycastEngineGPU._get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT ) dtype = np.dtype( { "names": ["header"], "formats": formats, "itemsize": itemsize, "align": True, } ) h_raygen_sbt = np.array([0], dtype=dtype) optix.sbtRecordPackHeader(raygen_prog_group, h_raygen_sbt) d_raygen_sbt = RaycastEngineGPU._array_to_device_memory(h_raygen_sbt) # Miss record formats = [header_format, "f4", "f4", "f4"] itemsize = RaycastEngineGPU._get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT ) dtype = np.dtype( { "names": ["header", "r", "g", "b"], "formats": formats, "itemsize": itemsize, "align": True, } ) h_miss_sbt = np.array([(0, 0.3, 0.1, 0.2)], dtype=dtype) # MissData optix.sbtRecordPackHeader(miss_prog_group, h_miss_sbt) d_miss_sbt = RaycastEngineGPU._array_to_device_memory(h_miss_sbt) # Hitgroup record. formats = [header_format, ctypes.c_void_p, ctypes.c_void_p] itemsize = RaycastEngineGPU._get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT ) dtype = np.dtype( { "names": ["header", "vertex_buffer", "triangle_buffer"], "formats": formats, "itemsize": itemsize, "align": True, } ) assert isinstance(self.cp_vertices, cp.ndarray) vertex_buffer_ptr_value = ctypes.c_void_p(self.cp_vertices.data.ptr).value assert isinstance(self.cp_triangles, cp.ndarray) triangle_buffer_ptr_value = ctypes.c_void_p(self.cp_triangles.data.ptr).value h_hitgroup_sbt = np.array( [(0, vertex_buffer_ptr_value, triangle_buffer_ptr_value)], dtype=dtype, ) optix.sbtRecordPackHeader(hitgroup_prog_group, h_hitgroup_sbt) d_hitgroup_sbt = RaycastEngineGPU._array_to_device_memory(h_hitgroup_sbt) sbt = optix.ShaderBindingTable( raygenRecord=d_raygen_sbt.ptr, missRecordBase=d_miss_sbt.ptr, missRecordStrideInBytes=h_miss_sbt.dtype.itemsize, missRecordCount=1, hitgroupRecordBase=d_hitgroup_sbt.ptr, hitgroupRecordStrideInBytes=h_hitgroup_sbt.dtype.itemsize, hitgroupRecordCount=1, ) return d_raygen_sbt, d_miss_sbt, d_hitgroup_sbt, sbt @staticmethod def _set_pipeline_options(): if optix.version()[1] >= 2: return optix.PipelineCompileOptions( usesMotionBlur=False, traversableGraphFlags=int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ), numPayloadValues=4, # Check __raygen__rg() to see num payload values. numAttributeValues=3, exceptionFlags=int(optix.EXCEPTION_FLAG_NONE), pipelineLaunchParamsVariableName="params", usesPrimitiveTypeFlags=optix.PRIMITIVE_TYPE_FLAGS_TRIANGLE, ) else: return optix.PipelineCompileOptions( usesMotionBlur=False, traversableGraphFlags=int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ), numPayloadValues=4, # Check __raygen__rg() to see num payload values. numAttributeValues=3, exceptionFlags=int(optix.EXCEPTION_FLAG_NONE), pipelineLaunchParamsVariableName="params", ) @staticmethod def _get_aligned_itemsize(formats, alignment): def round_up(val, mult_of): if val % mult_of == 0: return val else: return val + mult_of - val % mult_of names = [] for i in range(len(formats)): names.append("x" + str(i)) temp_dtype = np.dtype({"names": names, "formats": formats, "align": True}) return round_up(temp_dtype.itemsize, alignment) @staticmethod def _optix_version_gte(version): if optix.version()[0] > version[0]: return True if optix.version()[0] == version[0] and optix.version()[1] >= version[1]: return True return False @staticmethod def _array_to_device_memory(numpy_array, stream=cp.cuda.Stream()): byte_size = numpy_array.size * numpy_array.dtype.itemsize h_ptr = ctypes.c_void_p(numpy_array.ctypes.data) d_mem = cp.cuda.memory.alloc(byte_size) d_mem.copy_from_async(h_ptr, byte_size, stream) return d_mem @staticmethod def _compile_cuda(cuda_file_path): cuda_file_path = str(cuda_file_path) with open(cuda_file_path, "rb") as f: src = f.read() nvrtc_dll = os.environ.get("NVRTC_DLL") if nvrtc_dll is None: nvrtc_dll = "" else: print(f"NVRTC_DLL = {nvrtc_dll}") prog = Program(src.decode(), cuda_file_path, lib_name=nvrtc_dll) # Check the fist existing OptiX include path. optix_include_path = None for path in RaycastEngineGPU._optix_include_paths: if path.exists(): optix_include_path = str(path) break if optix_include_path is None: raise RuntimeError(f"OptiX include path does not exist: {path}") # Check CUDA include path. if not RaycastEngineGPU._cuda_include_path.exists(): raise RuntimeError( f"CUDA include path does not exist: {RaycastEngineGPU._cuda_include_path}" ) cuda_include_path = str(RaycastEngineGPU._cuda_include_path) compile_options = [ "-use_fast_math", "-lineinfo", "-default-device", "-std=c++11", "-rdc", "true", f"-I{cuda_include_path}", f"-I{optix_include_path}", ] # Optix 7.0 compiles need path to system <stddef.h>. The value of # optix.stddef_path is compiled in constant. if optix.version()[1] == 0: compile_options.append(f"-I{RaycastEngineGPU._stddef_include_path}") ptx = prog.compile(compile_options) return ptx def set_geometry(self, vertices: np.ndarray, triangles: np.ndarray) -> None: """ Set the geometry of the scene. This is useful to raycast the same scene for multiple lidar poses. """ self.gas_handle, self.d_gas_output_buffer = self._create_accel( self.ctx, np_vertices=vertices, np_triangles=triangles, ) ( self.d_raygen_sbt, self.d_miss_sbt, self.d_hitgroup_sbt, self.sbt, ) = self._create_sbt(self.prog_groups) def rays_intersect_mesh( self, rays: np.ndarray, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the given rays. Args: rays: (N, 6) float32 numpy array mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array """ # Sanity checks. if not isinstance(rays, np.ndarray): raise TypeError("rays must be a numpy array.") if rays.ndim != 2 or rays.shape[1] != 6: raise ValueError("rays must be a (N, 6) array.") raise NotImplementedError() def lidar_intersect_mesh( self, lidar: Lidar, mesh: o3d.geometry.TriangleMesh, ): """ Intersect the mesh with the lidar rays. Args: lidar: Lidar mesh: o3d.geometry.TriangleMesh Returns: points: (N, 3) float32 numpy array incident_angles: (N,) float32 numpy array """ self.set_geometry( vertices=np.asarray(mesh.vertices, dtype=np.float32), triangles=np.asarray(mesh.triangles, dtype=np.int32), ) # Copy rays to device. # This can be done more efficiently by calculating rays on GPU. rays = lidar.get_rays() rays_o = rays[:, :3] rays_d = rays[:, 3:] d_rays_o = cp.array(rays_o, dtype="f4") d_rays_d = cp.array(rays_d, dtype="f4") # Allocate output buffer. # This can be done more efficiently by reusing the same buffer. lidar_width = lidar.intrinsics.horizontal_res lidar_height = lidar.intrinsics.vertical_res d_hits = cp.empty((lidar_width, lidar_height, 3), dtype="f4") d_incident_angles = cp.empty((lidar_width, lidar_height), dtype="f4") # Prepare params. params = [ ("u4", "width", lidar_width), # uint32_t int ("u4", "height", lidar_height), # uint32_t int ("u8", "hits", d_hits.data.ptr), # uint64_t pointer ("u8", "incident_angles", d_incident_angles.data.ptr), # uint64_t pointer ("u8", "rays_o", d_rays_o.data.ptr), # uint64_t pointer ("u8", "rays_d", d_rays_d.data.ptr), # uint64_t pointer ("u8", "gas_handle", self.gas_handle), # uint64_t pointer ] formats = [x[0] for x in params] names = [x[1] for x in params] values = [x[2] for x in params] itemsize = RaycastEngineGPU._get_aligned_itemsize(formats, 8) params_dtype = np.dtype( { "names": names, "formats": formats, "itemsize": itemsize, "align": True, } ) h_params = np.array([tuple(values)], dtype=params_dtype) d_params = RaycastEngineGPU._array_to_device_memory(h_params) # Launch! stream = cp.cuda.Stream() optix.launch( self.pipeline, stream.ptr, d_params.ptr, h_params.dtype.itemsize, self.sbt, lidar_width, lidar_height, 1, # depth ) stream.synchronize() h_hits = cp.asnumpy(d_hits) h_incident_angles = cp.asnumpy(d_incident_angles) # Filter out missed points. h_hits = h_hits.reshape((lidar_height * lidar_width, 3)) h_incident_angles = h_incident_angles.reshape((lidar_height * lidar_width,)) im_render_mask = np.isfinite(h_hits[:, 0]) points = h_hits[im_render_mask] incident_angles = h_incident_angles[im_render_mask] # Filter out out-of-range points. max_range = lidar.intrinsics.max_range lidar_center = ct.convert.pose_to_C(lidar.pose) point_dists = np.linalg.norm(points - lidar_center, axis=1) points = points[point_dists < max_range] incident_angles = incident_angles[point_dists < max_range] assert len(points) == len(incident_angles) return points, incident_angles def main(): # Load test data. script_dir = Path(__file__).parent.absolute().resolve() lit_root = script_dir.parent.parent data_dir = lit_root / "data" raycast_data_path = data_dir / "test_data" / "raycast_data.pkl" raycast_mesh_path = data_dir / "test_data" / "raycast_mesh.ply" # Read lidar data. with open(raycast_data_path, "rb") as f: raycast_data = pickle.load(f) lidar = Lidar( intrinsics=raycast_data["lidar_intrinsics"], pose=raycast_data["pose"], ) # Read mesh. mesh = o3d.io.read_triangle_mesh(str(raycast_mesh_path)) mesh.compute_vertex_normals() # Ray cast GPU. raycast_engine_gpu = RaycastEngineGPU() points_gpu = raycast_engine_gpu.lidar_intersect_mesh(lidar=lidar, mesh=mesh) # Ray cast CPU for comparison. raycast_engine_cpu = RaycastEngineCPU() points_cpu = raycast_engine_cpu.lidar_intersect_mesh(lidar=lidar, mesh=mesh) is_all_close = np.allclose(points_cpu, points_gpu, rtol=1e-03, atol=1e-03) print(f"len(points_cpu) = {len(points_cpu)}, len(points_gpu) = {len(points_gpu)}") print(f"np.allclose(points_cpu, points_gpu) = {is_all_close}") # Visualize pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points_gpu) o3d.visualization.draw_geometries([pcd, mesh]) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/recon_utils.py
Python
import time import camtools as ct import igraph as ig import numpy as np import open3d as o3d import torch from lit.ext import lit_ext from pcdet.utils.box_utils import box_to_corners_3d def scale_bbox( bbox: np.ndarray, src_to_dst_scales: np.ndarray, ): """ Args: bbox: (8,) or (7,), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) src_to_dst_scales: (3,) scales. Notes: x: unchanged. y: unchanged. z: lower by dz_gap. dx: scaled by src_to_dst_scales[0]. dy: scaled by src_to_dst_scales[1]. dz: scaled by src_to_dst_scales[2]. heading: unchanged. label: unchanged. Return: bbox: (8,) or (7,), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) """ assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] src_to_dst_scales = np.asarray(src_to_dst_scales) assert src_to_dst_scales.ndim == 1 assert src_to_dst_scales.shape[0] == 3 scaled_bbox = bbox.copy() # Lower z dz = bbox[5] scaled_dz = dz * src_to_dst_scales[2] dz_gap = (dz - scaled_dz) / 2 scaled_bbox[2] -= dz_gap # Scale dx, dy, dz. scaled_bbox[3:6] *= src_to_dst_scales return scaled_bbox def scale_bboxes( bboxes: np.ndarray, src_to_dst_scales: np.ndarray, ): """ Args: bboxes: (N, 8) or (N, 7), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) src_to_dst_scales: (3,) scales. Notes: x: unchanged. y: unchanged. z: lower by dz_gap. dx: scaled by src_to_dst_scales[0]. dy: scaled by src_to_dst_scales[1]. dz: scaled by src_to_dst_scales[2]. heading: unchanged. label: unchanged. Return: scaled_bboxes: (N, 8) or (N, 7), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) """ assert isinstance(bboxes, np.ndarray) assert bboxes.ndim == 2 assert bboxes.shape[1] in [7, 8] src_to_dst_scales = np.asarray(src_to_dst_scales) assert src_to_dst_scales.ndim == 1 assert src_to_dst_scales.shape[0] == 3 scaled_bboxes = bboxes.copy() # Lower z dz = bboxes[:, 5] scaled_dz = dz * src_to_dst_scales[2] dz_gap = (dz - scaled_dz) / 2 scaled_bboxes[:, 2] -= dz_gap # Scale dx, dy, dz. scaled_bboxes[:, 3:6] *= src_to_dst_scales return scaled_bboxes def scale_bboxes_by_domain( bboxes: np.ndarray, src_domain: str, dst_domain: str, ): """ Args: bboxes: (N, 8) or (N, 7), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) src_domain: "waymo", "nuscenes", or "kitti". dst_domain: "waymo", "nuscenes", or "kitti". Return: scaled_bboxes: (N, 8) or (N, 7), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) """ assert (src_domain, dst_domain) in [ ("waymo", "nuscenes"), ("waymo", "kitti"), ("nuscenes", "kitti"), ] # Read scales. scales_dict = { "waymo_to_kitti_bbox_scale": [ 0.8086602687835693, 0.7795897722244263, 0.854170024394989, ], "waymo_to_nuscenes_bbox_scale": [ 0.9662209153175354, 0.9140116572380066, 0.9629638195037842, ], "nuscenes_to_kitti_bbox_scale": [ 0.836931049823761, 0.8529319763183594, 0.8870219588279724, ], } src_to_dst_scales = scales_dict[f"{src_domain}_to_{dst_domain}_bbox_scale"] return scale_bboxes( bboxes=bboxes, src_to_dst_scales=src_to_dst_scales, ) def scale_points_with_bbox( points: np.ndarray, bbox: np.ndarray, src_to_dst_scales: np.ndarray, ): """ Scaled points located inside a bbox, such that: - The extend of the bbox is scaled by src_to_dst_scales. - The x and y centers of the bbox remain unchanged. - The bbox still touches the ground. That is, the src bbox and dst bbox shares the same z_min. Args: points: (N, 3). bbox: (8,) or (7,), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) src_to_dst_scales: (3,) scales. Return: scaled_points, scaled_bbox """ assert isinstance(points, np.ndarray) assert points.ndim == 2 assert points.shape[1] == 3 assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] src_to_dst_scales = np.asarray(src_to_dst_scales) assert src_to_dst_scales.ndim == 1 assert src_to_dst_scales.shape[0] == 3 # Center points to bbox center. bbox_center = bbox[:3] scaled_points = points.copy() scaled_points = scaled_points - bbox_center # Scale points by src_to_dst_scales. scaled_points = scaled_points * src_to_dst_scales # Put points back to the original bbox center. scaled_points = scaled_points + bbox_center # Lower points by dz_gap. dz = bbox[5] scaled_dz = dz * src_to_dst_scales[2] dz_gap = (dz - scaled_dz) / 2 scaled_points[:, 2] -= dz_gap scaled_points = scaled_points.astype(points.dtype) # Compute scaled bbox. scaled_bbox = bbox.copy() scaled_bbox[2] -= dz_gap # Lower z. scaled_bbox[3:6] *= src_to_dst_scales # Scale dx, dy, dz. scaled_bbox = scaled_bbox.astype(bbox.dtype) return scaled_points, scaled_bbox def scale_points_with_bbox_by_domain( points: np.ndarray, bbox: np.ndarray, src_domain: str, dst_domain: str, ): """ Args: points: (N, 3). bbox: (8,) or (7,), in OpenPCDet format. x, y, z, dx, dy, dz, heading, (label) src_domain: "waymo", "nuscenes", or "kitti". dst_domain: "waymo", "nuscenes", or "kitti". Return: scaled_points, scaled_bbox """ assert (src_domain, dst_domain) in [ ("waymo", "nuscenes"), ("waymo", "kitti"), ("nuscenes", "kitti"), ] # Read scales. scales_dict = { "waymo_to_kitti_bbox_scale": [ 0.8086602687835693, 0.7795897722244263, 0.854170024394989, ], "waymo_to_nuscenes_bbox_scale": [ 0.9662209153175354, 0.9140116572380066, 0.9629638195037842, ], "nuscenes_to_kitti_bbox_scale": [ 0.836931049823761, 0.8529319763183594, 0.8870219588279724, ], } src_to_dst_scales = scales_dict[f"{src_domain}_to_{dst_domain}_bbox_scale"] return scale_points_with_bbox( points=points, bbox=bbox, src_to_dst_scales=src_to_dst_scales, ) def mesh_to_wire_frame(mesh: o3d.geometry.TriangleMesh): """ Convert mesh to wire frame lineset. """ assert isinstance(mesh, o3d.geometry.TriangleMesh) vertices = np.asarray(mesh.vertices) triangles = np.asarray(mesh.triangles) edges_ab = triangles[:, [0, 1]] edges_bc = triangles[:, [1, 2]] edges_ca = triangles[:, [2, 0]] lines = np.concatenate([edges_ab, edges_bc, edges_ca], axis=0) ls = o3d.geometry.LineSet() ls.points = o3d.utility.Vector3dVector(vertices) ls.lines = o3d.utility.Vector2iVector(lines) return ls def largest_cluster_mesh(mesh: o3d.geometry.TriangleMesh): """ Args: mesh: open3d.geometry.TriangleMesh. Returns: mesh: open3d.geometry.TriangleMesh, of the largest cluster. """ vertices = np.asarray(mesh.vertices) triangles = np.asarray(mesh.triangles) # Get the vertex indices of the biggest cluster. g = ig.Graph() g.add_vertices(len(vertices)) edges_a_b = triangles[:, [0, 1]] edges_b_c = triangles[:, [1, 2]] edges_c_a = triangles[:, [2, 0]] edges = np.concatenate([edges_a_b, edges_b_c, edges_c_a], axis=0) g.add_edges(edges) clusters = g.clusters() biggest_cluster_id = np.argmax(clusters.sizes()) vert_ids = clusters[biggest_cluster_id] # Create mesh and select the biggest cluster. mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.triangles = o3d.utility.Vector3iVector(triangles) mesh = mesh.select_by_index(vert_ids) return mesh # Context manager for temporarily changing the verbosity level to ERROR for Open3D. # This is a bug in Open3D. # [Open3D WARNING] invalid color in PaintUniformColor, clipping to [0, 1] class SuppressOpen3DWarning: def __enter__(self): self.old_verbosity_level = o3d.utility.get_verbosity_level() o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error) def __exit__(self, exc_type, exc_val, exc_tb): o3d.utility.set_verbosity_level(self.old_verbosity_level) def _bbox_to_lineset_with_open3d(bbox, frame_pose=None): """ Deprecated. This may generate differently ordered points compared to bbox_to_lineset(). Args: bbox: (8,) or (7,), in OpenPCDet format. frame_pose: (4, 4) pose. The bbox will be transformed to world coordinate. Returns: An Open3D lineset. Notes: bbox: [x, y, z, dx, dy, dz, heading, class] - x : center x. - y : center y. - z : center z. - dx : full length in x direction before rotation. - dy : full length in y direction before rotation. - dz : full length in z direction before rotation. - heading: rotation angle around z axis in radian, positive is counter-clockwise. It is not possible to transform bbox to another bbox with arbitrary pose, as bbox can only be rotated around z axis. Therefore, bbox is always used to represent a bbox in the local coordinate of the frame. """ assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] center = bbox[0:3] lwh = bbox[3:6] axis_angles = np.array([0, 0, bbox[6] + 1e-10]) rot = o3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles) box3d = o3d.geometry.OrientedBoundingBox(center, rot, lwh) with SuppressOpen3DWarning(): ls = o3d.geometry.LineSet.create_from_oriented_bounding_box(box3d) # Transform points. if frame_pose is not None: points = np.asarray(ls.points) points = ct.transform.transform_points(points, frame_pose) ls.points = o3d.utility.Vector3dVector(points) # Assign colors. ls.colors = o3d.utility.Vector3dVector(np.zeros_like(ls.points)) return ls def obb_to_lineset(obb: o3d.geometry.OrientedBoundingBox, frame_pose=None): with SuppressOpen3DWarning(): ls = o3d.geometry.LineSet.create_from_oriented_bounding_box(obb) # Transform points. if frame_pose is not None: points = np.asarray(ls.points) points = ct.transform.transform_points(points, frame_pose) ls.points = o3d.utility.Vector3dVector(points) return ls def bbox_to_lineset(bbox, frame_pose=None): """ 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: bbox: (8,) or (7,), in OpenPCDet format. frame_pose: (4, 4) pose. The bbox will be transformed to world coordinate. Returns: An Open3D lineset. Notes: bbox: [x, y, z, dx, dy, dz, heading, class] - x : center x. - y : center y. - z : center z. - dx : full length in x direction before rotation. - dy : full length in y direction before rotation. - dz : full length in z direction before rotation. - heading: rotation angle around z axis in radian, positive is counter-clockwise. It is not possible to transform bbox to another bbox with arbitrary pose, as bbox can only be rotated around z axis. Therefore, bbox is always used to represent a bbox in the local coordinate of the frame. """ corners = box_to_corners_3d(bbox) lines = np.array( [ # Bottom plane [0, 1], [1, 2], [2, 3], [3, 0], # Top plane [4, 5], [5, 6], [6, 7], # Vertical lines [7, 4], [0, 4], [1, 5], [2, 6], [3, 7], ] ) # Calculate the center of the top plane top_center = np.mean(corners[4:8], axis=0) # Determine the front of the bbox (midpoint between corners 4 and 5) top_front_midpoint = (corners[4] + corners[5]) / 2 # Create the main bbox LineSet main_ls = o3d.geometry.LineSet() main_ls.points = o3d.utility.Vector3dVector(corners) main_ls.lines = o3d.utility.Vector2iVector(lines) main_ls.colors = o3d.utility.Vector3dVector(np.zeros((lines.shape[0], 3))) # Create a separate LineSet for the heading line heading_ls = o3d.geometry.LineSet() heading_ls.points = o3d.utility.Vector3dVector([top_center, top_front_midpoint]) heading_ls.lines = o3d.utility.Vector2iVector(np.array([[0, 1]])) heading_ls.colors = o3d.utility.Vector3dVector([[1, 0, 0]]) # Combine the two LineSets combined_ls = main_ls + heading_ls # Apply transformation to the combined LineSet if frame_pose is provided if frame_pose is not None: combined_ls.transform(frame_pose) return combined_ls def bbox_to_corners(bbox, frame_pose=None): """ Args: bbox: (8,) or (7,), in OpenPCDet format. frame_pose: (4, 4) pose. The bbox will be transformed to world coordinate. Returns: Array of shape (8, 3), corner vertices of the bbox. """ corners = box_to_corners_3d(bbox) if frame_pose is not None: corners = ct.transform.transform_points(corners, frame_pose) return corners def bboxes_to_lineset(bboxes, frame_pose): """ Args: boxes: (N, 8) boxes. frame_pose: (4, 4) pose. All boxes are in the same frame of the pose. The bbox will be transformed to world coordinate. Returns: A fused Open3D line set. Ref: tools/visual_utils/open3d_vis_utils.py::translate_boxes_to_open3d_instance() """ assert isinstance(bboxes, np.ndarray) assert isinstance(frame_pose, np.ndarray) assert bboxes.ndim == 2 assert bboxes.shape[1] in [7, 8] assert frame_pose.shape == (4, 4) frame_lineset = o3d.geometry.LineSet() for bbox in bboxes: frame_lineset += bbox_to_lineset(bbox, frame_pose=frame_pose) return frame_lineset def bbox_to_open3d_obb(bbox): """ Convert a bbox to Open3D OrientedBoundingBox. """ assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] center = bbox[:3] lwh = bbox[3:6] axis_angles = np.array([0, 0, bbox[6] + 1e-10]) rot = o3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles) obb = o3d.geometry.OrientedBoundingBox(center, rot, lwh) return obb def get_indices_inside_bbox(points, bbox): """ Return point indices within a bbox. TODO: consider class info in bbox. Args: points: (N, 3). bbox: (8,) or (7,), in OpenPCDet format. Returns: (M, 3) points in the bbox. """ assert isinstance(points, np.ndarray) assert isinstance(bbox, np.ndarray) assert points.ndim == 2 assert points.shape[1] == 3 assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] obb = bbox_to_open3d_obb(bbox) points_o3d = o3d.utility.Vector3dVector(points) inside_indices = np.array(obb.get_point_indices_within_bounding_box(points_o3d)) return inside_indices def get_indices_inside_bboxes(points, bboxes): """ Return a list of point indices within multiple bboxes. Args: points: (N, 3). bboxes: (M, 8) or (M, 7), in OpenPCDet format. Returns: List[1D array]. Each array contains point indices in a bbox. Notes: 1. This will return a list of 1D arrays, where get_indices_outside_bboxes() returns a single 1D array. 2. If we call indices_inside_bbox() multiple times, o3d.utility.Vector3dVector() will be called multiple times, which is slow. Therefore, use indices_inside_bboxes() instead. """ assert isinstance(points, np.ndarray) assert isinstance(bboxes, np.ndarray) assert points.ndim == 2 assert points.shape[1] == 3 assert bboxes.ndim == 2 assert bboxes.shape[1] in [7, 8] points_o3d = o3d.utility.Vector3dVector(points) indices_inside_bboxes = [] for bbox in bboxes: obb = bbox_to_open3d_obb(bbox) indices_inside_bboxes.append( np.array(obb.get_point_indices_within_bounding_box(points_o3d)) ) return indices_inside_bboxes def get_indices_outside_bboxes(points, bboxes): """ Return (combined, i.e. shape (N,)) point indices within multiple bboxes. Args: points: (N, 3). bboxes: (M, 8) or (M, 7), in OpenPCDet format. """ assert isinstance(points, np.ndarray) assert isinstance(bboxes, np.ndarray) assert points.ndim == 2 assert points.shape[1] == 3 assert bboxes.ndim == 2 assert bboxes.shape[1] in [7, 8] points_o3d = o3d.utility.Vector3dVector(points) inside_indices = set() for bbox in bboxes: obb = bbox_to_open3d_obb(bbox) inside_indices.update( np.array(obb.get_point_indices_within_bounding_box(points_o3d)) ) inside_indices = np.array(sorted(list(inside_indices))) all_indices = np.arange(len(points)) outside_indices = all_indices[ ~np.in1d(all_indices, inside_indices, assume_unique=True) ] return outside_indices def bbox_corners_to_lineset(bbox_corners): """ Args: bbox_corners: (8, 3), corners of a bbox. Returns: ls: open3d.geometry.LineSet, lines of the bbox. 7 -------- 6 7 -------- 6 /| /| /| /| 4-------- 5 . 4-------- 5 . | | | | | | | | . 3 ------- 2 . 3 ------- 2 |/ |/ |/ |/ 0 -------- 1 0 -------- 1 7 -------- 6 7 -------- 6 /| /| /| /| 4-------- 5 . 4-------- 5 . | | | | | | | | . 3 ------- 2 . 3 ------- 2 |/ |/ |/ |/ 0 -------- 1 0 -------- 1 7 -------- 6 7 -------- 6 /| /| /| /| 4-------- 5 . 4-------- 5 . | | | | | | | | . 3 ------- 2 . 3 ------- 2 |/ |/ |/ |/ 0 -------- 1 0 -------- 1 Z ^ | / X |/ Y <----+ - The origin is the lidar center. - The origin is above the ground. - Typically: - Edge 0-3 is the longest (the length of the car). - Edge 0-1 is in the middle (the width of the car). - Edge 0-4 is the shortest (the height of the car). """ lines = [ [0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7], ] colors = [[1, 0, 0] for i in range(len(lines))] ls = o3d.geometry.LineSet() ls.points = o3d.utility.Vector3dVector(bbox_corners) ls.lines = o3d.utility.Vector2iVector(lines) ls.colors = o3d.utility.Vector3dVector(colors) return ls def extract_points_from_box(points, bbox): """ Extract points from a bbox. TODO: consider class info in a bbox. Args: points: (N, 3). bbox: (8,) or (7,), in OpenPCDet format. Returns: (M, 3) points in the bbox. """ assert isinstance(points, np.ndarray) assert isinstance(bbox, np.ndarray) assert points.ndim == 2 assert points.shape[1] == 3 assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] def box_to_open3d_obb(bbox): """ Convert a bbox to Open3D OrientedBoundingBox. """ assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 assert bbox.shape[0] in [7, 8] center = bbox[:3] lwh = bbox[3:6] axis_angles = np.array([0, 0, bbox[6] + 1e-10]) rot = o3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles) obb = o3d.geometry.OrientedBoundingBox(center, rot, lwh) return obb obb = box_to_open3d_obb(bbox) points_v3d = o3d.utility.Vector3dVector(points) ins_indices = np.array(obb.get_point_indices_within_bounding_box(points_v3d)) # out_indices = np.setdiff1d(np.arange(points.shape[0]), ins_indices) return points[ins_indices] def remove_statistical_outlier( points, lidar_centers=None, nb_neighbors=80, std_ratio=2.0, verbose=True, ): """ Set nb_neighbors = 0 to disable. """ if nb_neighbors == 0 or nb_neighbors is None: if lidar_centers is None: return points else: return points, lidar_centers if lidar_centers is not None and len(points) != len(lidar_centers): raise ValueError( f"len(points) != len(lidar_centers): {len(points)} != {len(lidar_centers)}" ) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) num_src_points = len(pcd.points) pcd, select_indices = pcd.remove_statistical_outlier( nb_neighbors=nb_neighbors, std_ratio=std_ratio, ) num_dst_points = len(pcd.points) if verbose: print(f"remove_statistical_outlier: {num_src_points} -> {num_dst_points}") points = points[select_indices] if lidar_centers is not None: lidar_centers = lidar_centers[select_indices] return points, lidar_centers else: return points def voxel_downsample( points, lidar_centers, voxel_size=0.25, verbose=False, ): if len(points) != len(lidar_centers): raise ValueError( f"len(points) != len(lidar_centers): {len(points)} != {len(lidar_centers)}" ) src_pcd = o3d.geometry.PointCloud() src_pcd.points = o3d.utility.Vector3dVector(points) dst_pcd = src_pcd.voxel_down_sample(voxel_size=voxel_size) dst_points = np.asarray(dst_pcd.points) k = 20 dst_lidar_centers = [] src_kdtree = o3d.geometry.KDTreeFlann(src_pcd) for dst_point in dst_points: [_, indices, _] = src_kdtree.search_knn_vector_3d(dst_point, k) k_src_lidar_centers = lidar_centers[indices] # Majority vote of lidar centers by row. unique_rows, counts = np.unique( k_src_lidar_centers, axis=0, return_counts=True, ) dst_lidar_center = unique_rows[np.argmax(counts)] dst_lidar_centers.append(dst_lidar_center) dst_lidar_centers = np.asarray(dst_lidar_centers) if verbose: print(f"voxel_downsample: {len(points)} -> {len(dst_points)}") return dst_points, dst_lidar_centers def rotate_points_y_front_to_x_front(points, inverse=False): """ Rotate points 90 degrees, counter-clockwise, around z axes. This effectively rotates points from y-pointing-to-road_front to x-pointing-to-road_front. Args: points: [N, 3] inverse: If True, rotate from x-front to y-front. Returns: points: [N, 3] of rotated points. """ points = np.asarray(points) assert points.ndim == 2 assert points.shape[1] == 3 if inverse: rot_matrix = np.array( [ [0, -1, 0], [1, 0, 0], [0, 0, 1], ] ) else: rot_matrix = np.array( [ [0, 1, 0], [-1, 0, 0], [0, 0, 1], ] ) t_matrix = np.array([0, 0, 0]) points = points @ rot_matrix.T + t_matrix return points def rotate_bbox_y_front_to_x_front(bbox): """ Args: bbox: [x, y, z, dx, dy, dz, heading, class] Returns: bbox: [x, y, z, dx, dy, dz, heading, class] of rotated bbox. """ bbox = np.asarray(bbox) assert bbox.ndim == 1 if bbox.shape[0] == 7: x, y, z, dx, dy, dz, heading = bbox elif bbox.shape[0] == 8: x, y, z, dx, dy, dz, heading, label = bbox else: raise ValueError(f"Unknown bbox shape: {bbox.shape}") new_x, new_y, new_z = rotate_points_y_front_to_x_front(np.array([[x, y, z]]))[0] new_dx = dy new_dy = dx new_dz = dz new_heading = heading if bbox.shape[0] == 7: new_bbox = np.array([new_x, new_y, new_z, new_dx, new_dy, new_dz, new_heading]) elif bbox.shape[0] == 8: new_bbox = np.array( [new_x, new_y, new_z, new_dx, new_dy, new_dz, new_heading, label] ) else: raise ValueError(f"Unknown bbox shape: {bbox.shape}") return new_bbox def rotate_bboxes_y_front_to_x_front(bboxes, inverse=False): """ Similar to rotate_bbox_y_front_to_x_front, but for multiple bboxes at the same time in a vectorized manner. Args: bboxes: [N, 7] inverse: If True, rotate from x-front to y-front. Returns: bboxes: [N, 7] of rotated bboxes. """ bboxes = np.asarray(bboxes) assert bboxes.ndim == 2 assert bboxes.shape[1] == 7 # Rotate. new_xyzs = bboxes[:, :3] # Unchanged. new_xyzs = rotate_points_y_front_to_x_front(new_xyzs, inverse=inverse) new_dx = bboxes[:, 4] # Changed to dy. new_dy = bboxes[:, 3] # Changed to dx. new_dz = bboxes[:, 5] # Unchanged. new_headings = bboxes[:, 6] # Unchanged. # Pack. new_bboxes = np.hstack( [ new_xyzs, new_dx[:, None], new_dy[:, None], new_dz[:, None], new_headings[:, None], ] ) return new_bboxes def incident_angles_to_colors(incident_angles): """ Args: incident_angles: (N, ) array from 0 to pi. Return: colors: (N, 3) array in floats. """ # Normalize incident_angles to range [0, 1] normalized_angles = incident_angles / np.pi # Create a color map: we'll use a simple red-to-blue gradient. # Red (1, 0, 0) for angle = pi, and Blue (0, 0, 1) for angle = 0 colors = np.zeros((len(incident_angles), 3)) # Initialize color array colors[:, 0] = normalized_angles # Red channel colors[:, 2] = 1 - normalized_angles # Blue channel return colors def split_mesh_by_cc(mesh: o3d.geometry.TriangleMesh): """ Split mesh by connected components. """ vertices = np.asarray(mesh.vertices) triangles = np.asarray(mesh.triangles) vertices = torch.tensor(vertices, dtype=torch.float32) triangles = torch.tensor(triangles, dtype=torch.int64) start_time = time.time() results = lit_ext.split_mesh_by_cc(vertices, triangles) print(f"split_mesh_by_cc: {time.time() - start_time:.2f} s") meshes = [] for vertices, triangles in results: mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices.numpy()) mesh.triangles = o3d.utility.Vector3iVector(triangles.numpy()) meshes.append(mesh) return meshes
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/utils.py
Python
import random import numpy as np import torch def seed_everything(): random.seed(0) np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed(0) torch.cuda.manual_seed_all(0)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit/vector_math.h
C/C++ Header
// // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #pragma once #if defined(__CUDACC__) || defined(__CUDABE__) #define SUTIL_HOSTDEVICE __host__ __device__ #define SUTIL_INLINE __forceinline__ #define CONST_STATIC_INIT(...) #else #define SUTIL_HOSTDEVICE #define SUTIL_INLINE inline #define CONST_STATIC_INIT(...) = __VA_ARGS__ #endif #include <vector_functions.h> #include <vector_types.h> #if !defined(__CUDACC_RTC__) #include <cmath> #include <cstdlib> #endif /* scalar functions used in vector functions */ #ifndef M_PIf #define M_PIf 3.14159265358979323846f #endif #ifndef M_PI_2f #define M_PI_2f 1.57079632679489661923f #endif #ifndef M_1_PIf #define M_1_PIf 0.318309886183790671538f #endif #if !defined(__CUDACC__) SUTIL_INLINE SUTIL_HOSTDEVICE int max(int a, int b) { return a > b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE int min(int a, int b) { return a < b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE long long max(long long a, long long b) { return a > b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE long long min(long long a, long long b) { return a < b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int max(unsigned int a, unsigned int b) { return a > b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int min(unsigned int a, unsigned int b) { return a < b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long max(unsigned long long a, unsigned long long b) { return a > b ? a : b; } SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long min(unsigned long long a, unsigned long long b) { return a < b ? a : b; } /** lerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float lerp(const float a, const float b, const float t) { return a + t * (b - a); } /** bilerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float bilerp(const float x00, const float x10, const float x01, const float x11, const float u, const float v) { return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v); } template <typename IntegerType> SUTIL_INLINE SUTIL_HOSTDEVICE IntegerType roundUp(IntegerType x, IntegerType y) { return ((x + y - 1) / y) * y; } #endif /** clamp */ SUTIL_INLINE SUTIL_HOSTDEVICE float clamp(const float f, const float a, const float b) { return fmaxf(a, fminf(f, b)); } /* float2 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float s) { return make_float2(s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const int2& a) { return make_float2(float(a.x), float(a.y)); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const uint2& a) { return make_float2(float(a.x), float(a.y)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a) { return make_float2(-a.x, -a.y); } /** min * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 fminf(const float2& a, const float2& b) { return make_float2(fminf(a.x, b.x), fminf(a.y, b.y)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float2& a) { return fminf(a.x, a.y); } /** @} */ /** max * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 fmaxf(const float2& a, const float2& b) { return make_float2(fmaxf(a.x, b.x), fmaxf(a.y, b.y)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float2& a) { return fmaxf(a.x, a.y); } /** @} */ /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float2& a, const float2& b) { return make_float2(a.x + b.x, a.y + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float2& a, const float b) { return make_float2(a.x + b, a.y + b); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float a, const float2& b) { return make_float2(a + b.x, a + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float2& a, const float2& b) { a.x += b.x; a.y += b.y; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a, const float2& b) { return make_float2(a.x - b.x, a.y - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a, const float b) { return make_float2(a.x - b, a.y - b); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float a, const float2& b) { return make_float2(a - b.x, a - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float2& a, const float2& b) { a.x -= b.x; a.y -= b.y; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float2& a, const float2& b) { return make_float2(a.x * b.x, a.y * b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float2& a, const float s) { return make_float2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float s, const float2& a) { return make_float2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float2& a, const float2& s) { a.x *= s.x; a.y *= s.y; } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float2& a, const float s) { a.x *= s; a.y *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float2& a, const float2& b) { return make_float2(a.x / b.x, a.y / b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float2& a, const float s) { float inv = 1.0f / s; return a * inv; } SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float s, const float2& a) { return make_float2(s / a.x, s / a.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float2& a, const float s) { float inv = 1.0f / s; a *= inv; } /** @} */ /** lerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 lerp(const float2& a, const float2& b, const float t) { return a + t * (b - a); } /** bilerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 bilerp(const float2& x00, const float2& x10, const float2& x01, const float2& x11, const float u, const float v) { return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v); } /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 clamp(const float2& v, const float a, const float b) { return make_float2(clamp(v.x, a, b), clamp(v.y, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 clamp(const float2& v, const float2& a, const float2& b) { return make_float2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } /** @} */ /** dot product */ SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float2& a, const float2& b) { return a.x * b.x + a.y * b.y; } /** length */ SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float2& v) { return sqrtf(dot(v, v)); } /** normalize */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 normalize(const float2& v) { float invLen = 1.0f / sqrtf(dot(v, v)); return v * invLen; } /** floor */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 floor(const float2& v) { return make_float2(::floorf(v.x), ::floorf(v.y)); } /** reflect */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 reflect(const float2& i, const float2& n) { return i - 2.0f * n * dot(n, i); } /** Faceforward * Returns N if dot(i, nref) > 0; else -N; * Typical usage is N = faceforward(N, -ray.dir, N); * Note that this is opposite of what faceforward does in Cg and GLSL */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 faceforward(const float2& n, const float2& i, const float2& nref) { return n * copysignf(1.0f, dot(i, nref)); } /** exp */ SUTIL_INLINE SUTIL_HOSTDEVICE float2 expf(const float2& v) { return make_float2(::expf(v.x), ::expf(v.y)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float2& v, int i) { return ((float*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float2& v, int i, float x) { ((float*)(&v))[i] = x; } /* float3 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float s) { return make_float3(s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float2& a) { return make_float3(a.x, a.y, 0.0f); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const int3& a) { return make_float3(float(a.x), float(a.y), float(a.z)); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const uint3& a) { return make_float3(float(a.x), float(a.y), float(a.z)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a) { return make_float3(-a.x, -a.y, -a.z); } /** min * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 fminf(const float3& a, const float3& b) { return make_float3(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float3& a) { return fminf(fminf(a.x, a.y), a.z); } /** @} */ /** max * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 fmaxf(const float3& a, const float3& b) { return make_float3(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float3& a) { return fmaxf(fmaxf(a.x, a.y), a.z); } /** @} */ /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float3& a, const float3& b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float3& a, const float b) { return make_float3(a.x + b, a.y + b, a.z + b); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float a, const float3& b) { return make_float3(a + b.x, a + b.y, a + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float3& a, const float3& b) { a.x += b.x; a.y += b.y; a.z += b.z; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a, const float3& b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a, const float b) { return make_float3(a.x - b, a.y - b, a.z - b); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float a, const float3& b) { return make_float3(a - b.x, a - b.y, a - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float3& a, const float3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float3& a, const float3& b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float3& a, const float s) { return make_float3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float s, const float3& a) { return make_float3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float3& a, const float3& s) { a.x *= s.x; a.y *= s.y; a.z *= s.z; } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float3& a, const float s) { a.x *= s; a.y *= s; a.z *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float3& a, const float3& b) { return make_float3(a.x / b.x, a.y / b.y, a.z / b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float3& a, const float s) { float inv = 1.0f / s; return a * inv; } SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float s, const float3& a) { return make_float3(s / a.x, s / a.y, s / a.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float3& a, const float s) { float inv = 1.0f / s; a *= inv; } /** @} */ /** lerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 lerp(const float3& a, const float3& b, const float t) { return a + t * (b - a); } /** bilerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 bilerp(const float3& x00, const float3& x10, const float3& x01, const float3& x11, const float u, const float v) { return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v); } /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 clamp(const float3& v, const float a, const float b) { return make_float3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 clamp(const float3& v, const float3& a, const float3& b) { return make_float3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } /** @} */ /** dot product */ SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float3& a, const float3& b) { return a.x * b.x + a.y * b.y + a.z * b.z; } /** cross product */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 cross(const float3& a, const float3& b) { return make_float3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } /** length */ SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float3& v) { return sqrtf(dot(v, v)); } /** normalize */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 normalize(const float3& v) { float invLen = 1.0f / sqrtf(dot(v, v)); return v * invLen; } /** floor */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 floor(const float3& v) { return make_float3(::floorf(v.x), ::floorf(v.y), ::floorf(v.z)); } /** reflect */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 reflect(const float3& i, const float3& n) { return i - 2.0f * n * dot(n, i); } /** Faceforward * Returns N if dot(i, nref) > 0; else -N; * Typical usage is N = faceforward(N, -ray.dir, N); * Note that this is opposite of what faceforward does in Cg and GLSL */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 faceforward(const float3& n, const float3& i, const float3& nref) { return n * copysignf(1.0f, dot(i, nref)); } /** exp */ SUTIL_INLINE SUTIL_HOSTDEVICE float3 expf(const float3& v) { return make_float3(::expf(v.x), ::expf(v.y), ::expf(v.z)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float3& v, int i) { return ((float*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float3& v, int i, float x) { ((float*)(&v))[i] = x; } /* float4 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float s) { return make_float4(s, s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float3& a) { return make_float4(a.x, a.y, a.z, 0.0f); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const int4& a) { return make_float4(float(a.x), float(a.y), float(a.z), float(a.w)); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const uint4& a) { return make_float4(float(a.x), float(a.y), float(a.z), float(a.w)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a) { return make_float4(-a.x, -a.y, -a.z, -a.w); } /** min * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 fminf(const float4& a, const float4& b) { return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float4& a) { return fminf(fminf(a.x, a.y), fminf(a.z, a.w)); } /** @} */ /** max * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 fmaxf(const float4& a, const float4& b) { return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w)); } SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float4& a) { return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w)); } /** @} */ /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float4& a, const float4& b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float4& a, const float b) { return make_float4(a.x + b, a.y + b, a.z + b, a.w + b); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float a, const float4& b) { return make_float4(a + b.x, a + b.y, a + b.z, a + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float4& a, const float4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a, const float4& b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a, const float b) { return make_float4(a.x - b, a.y - b, a.z - b, a.w - b); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float a, const float4& b) { return make_float4(a - b.x, a - b.y, a - b.z, a - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float4& a, const float4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float4& a, const float4& s) { return make_float4(a.x * s.x, a.y * s.y, a.z * s.z, a.w * s.w); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float4& a, const float s) { return make_float4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float s, const float4& a) { return make_float4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float4& a, const float4& s) { a.x *= s.x; a.y *= s.y; a.z *= s.z; a.w *= s.w; } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float4& a, const float s) { a.x *= s; a.y *= s; a.z *= s; a.w *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float4& a, const float4& b) { return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float4& a, const float s) { float inv = 1.0f / s; return a * inv; } SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float s, const float4& a) { return make_float4(s / a.x, s / a.y, s / a.z, s / a.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float4& a, const float s) { float inv = 1.0f / s; a *= inv; } /** @} */ /** lerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 lerp(const float4& a, const float4& b, const float t) { return a + t * (b - a); } /** bilerp */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 bilerp(const float4& x00, const float4& x10, const float4& x01, const float4& x11, const float u, const float v) { return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v); } /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 clamp(const float4& v, const float a, const float b) { return make_float4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 clamp(const float4& v, const float4& a, const float4& b) { return make_float4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } /** @} */ /** dot product */ SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float4& a, const float4& b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } /** length */ SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float4& r) { return sqrtf(dot(r, r)); } /** normalize */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 normalize(const float4& v) { float invLen = 1.0f / sqrtf(dot(v, v)); return v * invLen; } /** floor */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 floor(const float4& v) { return make_float4(::floorf(v.x), ::floorf(v.y), ::floorf(v.z), ::floorf(v.w)); } /** reflect */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 reflect(const float4& i, const float4& n) { return i - 2.0f * n * dot(n, i); } /** * Faceforward * Returns N if dot(i, nref) > 0; else -N; * Typical usage is N = faceforward(N, -ray.dir, N); * Note that this is opposite of what faceforward does in Cg and GLSL */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 faceforward(const float4& n, const float4& i, const float4& nref) { return n * copysignf(1.0f, dot(i, nref)); } /** exp */ SUTIL_INLINE SUTIL_HOSTDEVICE float4 expf(const float4& v) { return make_float4(::expf(v.x), ::expf(v.y), ::expf(v.z), ::expf(v.w)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float4& v, int i) { return ((float*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float4& v, int i, float x) { ((float*)(&v))[i] = x; } /* int functions */ /******************************************************************************/ /** clamp */ SUTIL_INLINE SUTIL_HOSTDEVICE int clamp(const int f, const int a, const int b) { return max(a, min(f, b)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int1& v, int i) { return ((int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int1& v, int i, int x) { ((int*)(&v))[i] = x; } /* int2 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int s) { return make_int2(s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const float2& a) { return make_int2(int(a.x), int(a.y)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a) { return make_int2(-a.x, -a.y); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 min(const int2& a, const int2& b) { return make_int2(min(a.x, b.x), min(a.y, b.y)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 max(const int2& a, const int2& b) { return make_int2(max(a.x, b.x), max(a.y, b.y)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator+(const int2& a, const int2& b) { return make_int2(a.x + b.x, a.y + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int2& a, const int2& b) { a.x += b.x; a.y += b.y; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a, const int2& b) { return make_int2(a.x - b.x, a.y - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a, const int b) { return make_int2(a.x - b, a.y - b); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int2& a, const int2& b) { a.x -= b.x; a.y -= b.y; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int2& a, const int2& b) { return make_int2(a.x * b.x, a.y * b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int2& a, const int s) { return make_int2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int s, const int2& a) { return make_int2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int2& a, const int s) { a.x *= s; a.y *= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 clamp(const int2& v, const int a, const int b) { return make_int2(clamp(v.x, a, b), clamp(v.y, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 clamp(const int2& v, const int2& a, const int2& b) { return make_int2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int2& a, const int2& b) { return a.x == b.x && a.y == b.y; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int2& a, const int2& b) { return a.x != b.x || a.y != b.y; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int2& v, int i) { return ((int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int2& v, int i, int x) { ((int*)(&v))[i] = x; } /* int3 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int s) { return make_int3(s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const float3& a) { return make_int3(int(a.x), int(a.y), int(a.z)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator-(const int3& a) { return make_int3(-a.x, -a.y, -a.z); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 min(const int3& a, const int3& b) { return make_int3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 max(const int3& a, const int3& b) { return make_int3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator+(const int3& a, const int3& b) { return make_int3(a.x + b.x, a.y + b.y, a.z + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int3& a, const int3& b) { a.x += b.x; a.y += b.y; a.z += b.z; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator-(const int3& a, const int3& b) { return make_int3(a.x - b.x, a.y - b.y, a.z - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int3& a, const int3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int3& a, const int3& b) { return make_int3(a.x * b.x, a.y * b.y, a.z * b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int3& a, const int s) { return make_int3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int s, const int3& a) { return make_int3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int3& a, const int s) { a.x *= s; a.y *= s; a.z *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int3& a, const int3& b) { return make_int3(a.x / b.x, a.y / b.y, a.z / b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int3& a, const int s) { return make_int3(a.x / s, a.y / s, a.z / s); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int s, const int3& a) { return make_int3(s / a.x, s / a.y, s / a.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(int3& a, const int s) { a.x /= s; a.y /= s; a.z /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 clamp(const int3& v, const int a, const int b) { return make_int3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 clamp(const int3& v, const int3& a, const int3& b) { return make_int3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int3& a, const int3& b) { return a.x == b.x && a.y == b.y && a.z == b.z; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int3& a, const int3& b) { return a.x != b.x || a.y != b.y || a.z != b.z; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int3& v, int i) { return ((int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int3& v, int i, int x) { ((int*)(&v))[i] = x; } /* int4 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int s) { return make_int4(s, s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const float4& a) { return make_int4((int)a.x, (int)a.y, (int)a.z, (int)a.w); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator-(const int4& a) { return make_int4(-a.x, -a.y, -a.z, -a.w); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 min(const int4& a, const int4& b) { return make_int4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 max(const int4& a, const int4& b) { return make_int4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator+(const int4& a, const int4& b) { return make_int4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int4& a, const int4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator-(const int4& a, const int4& b) { return make_int4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int4& a, const int4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int4& a, const int4& b) { return make_int4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int4& a, const int s) { return make_int4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int s, const int4& a) { return make_int4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int4& a, const int s) { a.x *= s; a.y *= s; a.z *= s; a.w *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int4& a, const int4& b) { return make_int4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int4& a, const int s) { return make_int4(a.x / s, a.y / s, a.z / s, a.w / s); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int s, const int4& a) { return make_int4(s / a.x, s / a.y, s / a.z, s / a.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(int4& a, const int s) { a.x /= s; a.y /= s; a.z /= s; a.w /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int4 clamp(const int4& v, const int a, const int b) { return make_int4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 clamp(const int4& v, const int4& a, const int4& b) { return make_int4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int4& a, const int4& b) { return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int4& a, const int4& b) { return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int4& v, int i) { return ((int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int4& v, int i, int x) { ((int*)(&v))[i] = x; } /* uint functions */ /******************************************************************************/ /** clamp */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int clamp(const unsigned int f, const unsigned int a, const unsigned int b) { return max(a, min(f, b)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint1& v, unsigned int i) { return ((unsigned int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint1& v, int i, unsigned int x) { ((unsigned int*)(&v))[i] = x; } /* uint2 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const unsigned int s) { return make_uint2(s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const float2& a) { return make_uint2((unsigned int)a.x, (unsigned int)a.y); } /** @} */ /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 min(const uint2& a, const uint2& b) { return make_uint2(min(a.x, b.x), min(a.y, b.y)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 max(const uint2& a, const uint2& b) { return make_uint2(max(a.x, b.x), max(a.y, b.y)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator+(const uint2& a, const uint2& b) { return make_uint2(a.x + b.x, a.y + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint2& a, const uint2& b) { a.x += b.x; a.y += b.y; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator-(const uint2& a, const uint2& b) { return make_uint2(a.x - b.x, a.y - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator-(const uint2& a, const unsigned int b) { return make_uint2(a.x - b, a.y - b); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint2& a, const uint2& b) { a.x -= b.x; a.y -= b.y; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const uint2& a, const uint2& b) { return make_uint2(a.x * b.x, a.y * b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const uint2& a, const unsigned int s) { return make_uint2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const unsigned int s, const uint2& a) { return make_uint2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint2& a, const unsigned int s) { a.x *= s; a.y *= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint2 clamp(const uint2& v, const unsigned int a, const unsigned int b) { return make_uint2(clamp(v.x, a, b), clamp(v.y, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 clamp(const uint2& v, const uint2& a, const uint2& b) { return make_uint2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint2& a, const uint2& b) { return a.x == b.x && a.y == b.y; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint2& a, const uint2& b) { return a.x != b.x || a.y != b.y; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint2& v, unsigned int i) { return ((unsigned int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint2& v, int i, unsigned int x) { ((unsigned int*)(&v))[i] = x; } /* uint3 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const unsigned int s) { return make_uint3(s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const float3& a) { return make_uint3((unsigned int)a.x, (unsigned int)a.y, (unsigned int)a.z); } /** @} */ /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 min(const uint3& a, const uint3& b) { return make_uint3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 max(const uint3& a, const uint3& b) { return make_uint3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator+(const uint3& a, const uint3& b) { return make_uint3(a.x + b.x, a.y + b.y, a.z + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint3& a, const uint3& b) { a.x += b.x; a.y += b.y; a.z += b.z; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator-(const uint3& a, const uint3& b) { return make_uint3(a.x - b.x, a.y - b.y, a.z - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint3& a, const uint3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const uint3& a, const uint3& b) { return make_uint3(a.x * b.x, a.y * b.y, a.z * b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const uint3& a, const unsigned int s) { return make_uint3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const unsigned int s, const uint3& a) { return make_uint3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint3& a, const unsigned int s) { a.x *= s; a.y *= s; a.z *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const uint3& a, const uint3& b) { return make_uint3(a.x / b.x, a.y / b.y, a.z / b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const uint3& a, const unsigned int s) { return make_uint3(a.x / s, a.y / s, a.z / s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const unsigned int s, const uint3& a) { return make_uint3(s / a.x, s / a.y, s / a.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(uint3& a, const unsigned int s) { a.x /= s; a.y /= s; a.z /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint3 clamp(const uint3& v, const unsigned int a, const unsigned int b) { return make_uint3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 clamp(const uint3& v, const uint3& a, const uint3& b) { return make_uint3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint3& a, const uint3& b) { return a.x == b.x && a.y == b.y && a.z == b.z; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint3& a, const uint3& b) { return a.x != b.x || a.y != b.y || a.z != b.z; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint3& v, unsigned int i) { return ((unsigned int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint3& v, int i, unsigned int x) { ((unsigned int*)(&v))[i] = x; } /* uint4 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int s) { return make_uint4(s, s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const float4& a) { return make_uint4((unsigned int)a.x, (unsigned int)a.y, (unsigned int)a.z, (unsigned int)a.w); } /** @} */ /** min * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 min(const uint4& a, const uint4& b) { return make_uint4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); } /** @} */ /** max * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 max(const uint4& a, const uint4& b) { return make_uint4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); } /** @} */ /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator+(const uint4& a, const uint4& b) { return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint4& a, const uint4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator-(const uint4& a, const uint4& b) { return make_uint4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint4& a, const uint4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const uint4& a, const uint4& b) { return make_uint4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const uint4& a, const unsigned int s) { return make_uint4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const unsigned int s, const uint4& a) { return make_uint4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint4& a, const unsigned int s) { a.x *= s; a.y *= s; a.z *= s; a.w *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const uint4& a, const uint4& b) { return make_uint4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const uint4& a, const unsigned int s) { return make_uint4(a.x / s, a.y / s, a.z / s, a.w / s); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const unsigned int s, const uint4& a) { return make_uint4(s / a.x, s / a.y, s / a.z, s / a.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(uint4& a, const unsigned int s) { a.x /= s; a.y /= s; a.z /= s; a.w /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE uint4 clamp(const uint4& v, const unsigned int a, const unsigned int b) { return make_uint4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 clamp(const uint4& v, const uint4& a, const uint4& b) { return make_uint4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint4& a, const uint4& b) { return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint4& a, const uint4& b) { return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint4& v, unsigned int i) { return ((unsigned int*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint4& v, int i, unsigned int x) { ((unsigned int*)(&v))[i] = x; } /* long long functions */ /******************************************************************************/ /** clamp */ SUTIL_INLINE SUTIL_HOSTDEVICE long long clamp(const long long f, const long long a, const long long b) { return max(a, min(f, b)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong1& v, int i) { return ((long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong1& v, int i, long long x) { ((long long*)(&v))[i] = x; } /* longlong2 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const long long s) { return make_longlong2(s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const float2& a) { return make_longlong2(int(a.x), int(a.y)); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a) { return make_longlong2(-a.x, -a.y); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 min(const longlong2& a, const longlong2& b) { return make_longlong2(min(a.x, b.x), min(a.y, b.y)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 max(const longlong2& a, const longlong2& b) { return make_longlong2(max(a.x, b.x), max(a.y, b.y)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator+(const longlong2& a, const longlong2& b) { return make_longlong2(a.x + b.x, a.y + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong2& a, const longlong2& b) { a.x += b.x; a.y += b.y; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a, const longlong2& b) { return make_longlong2(a.x - b.x, a.y - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a, const long long b) { return make_longlong2(a.x - b, a.y - b); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong2& a, const longlong2& b) { a.x -= b.x; a.y -= b.y; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const longlong2& a, const longlong2& b) { return make_longlong2(a.x * b.x, a.y * b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const longlong2& a, const long long s) { return make_longlong2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const long long s, const longlong2& a) { return make_longlong2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong2& a, const long long s) { a.x *= s; a.y *= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 clamp(const longlong2& v, const long long a, const long long b) { return make_longlong2(clamp(v.x, a, b), clamp(v.y, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 clamp(const longlong2& v, const longlong2& a, const longlong2& b) { return make_longlong2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong2& a, const longlong2& b) { return a.x == b.x && a.y == b.y; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong2& a, const longlong2& b) { return a.x != b.x || a.y != b.y; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong2& v, int i) { return ((long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong2& v, int i, long long x) { ((long long*)(&v))[i] = x; } /* longlong3 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const long long s) { return make_longlong3(s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const float3& a) { return make_longlong3((long long)a.x, (long long)a.y, (long long)a.z); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator-(const longlong3& a) { return make_longlong3(-a.x, -a.y, -a.z); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 min(const longlong3& a, const longlong3& b) { return make_longlong3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 max(const longlong3& a, const longlong3& b) { return make_longlong3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator+(const longlong3& a, const longlong3& b) { return make_longlong3(a.x + b.x, a.y + b.y, a.z + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong3& a, const longlong3& b) { a.x += b.x; a.y += b.y; a.z += b.z; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator-(const longlong3& a, const longlong3& b) { return make_longlong3(a.x - b.x, a.y - b.y, a.z - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong3& a, const longlong3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const longlong3& a, const longlong3& b) { return make_longlong3(a.x * b.x, a.y * b.y, a.z * b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const longlong3& a, const long long s) { return make_longlong3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const long long s, const longlong3& a) { return make_longlong3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong3& a, const long long s) { a.x *= s; a.y *= s; a.z *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const longlong3& a, const longlong3& b) { return make_longlong3(a.x / b.x, a.y / b.y, a.z / b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const longlong3& a, const long long s) { return make_longlong3(a.x / s, a.y / s, a.z / s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const long long s, const longlong3& a) { return make_longlong3(s / a.x, s / a.y, s / a.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(longlong3& a, const long long s) { a.x /= s; a.y /= s; a.z /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 clamp(const longlong3& v, const long long a, const long long b) { return make_longlong3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 clamp(const longlong3& v, const longlong3& a, const longlong3& b) { return make_longlong3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong3& a, const longlong3& b) { return a.x == b.x && a.y == b.y && a.z == b.z; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong3& a, const longlong3& b) { return a.x != b.x || a.y != b.y || a.z != b.z; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong3& v, int i) { return ((long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong3& v, int i, int x) { ((long long*)(&v))[i] = x; } /* longlong4 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long s) { return make_longlong4(s, s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const float4& a) { return make_longlong4((long long)a.x, (long long)a.y, (long long)a.z, (long long)a.w); } /** @} */ /** negate */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator-(const longlong4& a) { return make_longlong4(-a.x, -a.y, -a.z, -a.w); } /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 min(const longlong4& a, const longlong4& b) { return make_longlong4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 max(const longlong4& a, const longlong4& b) { return make_longlong4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator+(const longlong4& a, const longlong4& b) { return make_longlong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong4& a, const longlong4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator-(const longlong4& a, const longlong4& b) { return make_longlong4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong4& a, const longlong4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const longlong4& a, const longlong4& b) { return make_longlong4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const longlong4& a, const long long s) { return make_longlong4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const long long s, const longlong4& a) { return make_longlong4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong4& a, const long long s) { a.x *= s; a.y *= s; a.z *= s; a.w *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const longlong4& a, const longlong4& b) { return make_longlong4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const longlong4& a, const long long s) { return make_longlong4(a.x / s, a.y / s, a.z / s, a.w / s); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const long long s, const longlong4& a) { return make_longlong4(s / a.x, s / a.y, s / a.z, s / a.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(longlong4& a, const long long s) { a.x /= s; a.y /= s; a.z /= s; a.w /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 clamp(const longlong4& v, const long long a, const long long b) { return make_longlong4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 clamp(const longlong4& v, const longlong4& a, const longlong4& b) { return make_longlong4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong4& a, const longlong4& b) { return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong4& a, const longlong4& b) { return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong4& v, int i) { return ((long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong4& v, int i, long long x) { ((long long*)(&v))[i] = x; } /* ulonglong functions */ /******************************************************************************/ /** clamp */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long clamp( const unsigned long long f, const unsigned long long a, const unsigned long long b) { return max(a, min(f, b)); } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong1& v, unsigned int i) { return ((unsigned long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong1& v, int i, unsigned long long x) { ((unsigned long long*)(&v))[i] = x; } /* ulonglong2 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const unsigned long long s) { return make_ulonglong2(s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const float2& a) { return make_ulonglong2((unsigned long long)a.x, (unsigned long long)a.y); } /** @} */ /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 min(const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(min(a.x, b.x), min(a.y, b.y)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 max(const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(max(a.x, b.x), max(a.y, b.y)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator+(const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(a.x + b.x, a.y + b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong2& a, const ulonglong2& b) { a.x += b.x; a.y += b.y; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator-(const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(a.x - b.x, a.y - b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator-(const ulonglong2& a, const unsigned long long b) { return make_ulonglong2(a.x - b, a.y - b); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong2& a, const ulonglong2& b) { a.x -= b.x; a.y -= b.y; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(a.x * b.x, a.y * b.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const ulonglong2& a, const unsigned long long s) { return make_ulonglong2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const unsigned long long s, const ulonglong2& a) { return make_ulonglong2(a.x * s, a.y * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong2& a, const unsigned long long s) { a.x *= s; a.y *= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 clamp(const ulonglong2& v, const unsigned long long a, const unsigned long long b) { return make_ulonglong2(clamp(v.x, a, b), clamp(v.y, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 clamp(const ulonglong2& v, const ulonglong2& a, const ulonglong2& b) { return make_ulonglong2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong2& a, const ulonglong2& b) { return a.x == b.x && a.y == b.y; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong2& a, const ulonglong2& b) { return a.x != b.x || a.y != b.y; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong2& v, unsigned int i) { return ((unsigned long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong2& v, int i, unsigned long long x) { ((unsigned long long*)(&v))[i] = x; } /* ulonglong3 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const unsigned long long s) { return make_ulonglong3(s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const float3& a) { return make_ulonglong3((unsigned long long)a.x, (unsigned long long)a.y, (unsigned long long)a.z); } /** @} */ /** min */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 min(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z)); } /** max */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 max(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z)); } /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator+(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(a.x + b.x, a.y + b.y, a.z + b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong3& a, const ulonglong3& b) { a.x += b.x; a.y += b.y; a.z += b.z; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator-(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(a.x - b.x, a.y - b.y, a.z - b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong3& a, const ulonglong3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(a.x * b.x, a.y * b.y, a.z * b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const ulonglong3& a, const unsigned long long s) { return make_ulonglong3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const unsigned long long s, const ulonglong3& a) { return make_ulonglong3(a.x * s, a.y * s, a.z * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong3& a, const unsigned long long s) { a.x *= s; a.y *= s; a.z *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(a.x / b.x, a.y / b.y, a.z / b.z); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const ulonglong3& a, const unsigned long long s) { return make_ulonglong3(a.x / s, a.y / s, a.z / s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const unsigned long long s, const ulonglong3& a) { return make_ulonglong3(s / a.x, s / a.y, s / a.z); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(ulonglong3& a, const unsigned long long s) { a.x /= s; a.y /= s; a.z /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 clamp(const ulonglong3& v, const unsigned long long a, const unsigned long long b) { return make_ulonglong3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 clamp(const ulonglong3& v, const ulonglong3& a, const ulonglong3& b) { return make_ulonglong3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong3& a, const ulonglong3& b) { return a.x == b.x && a.y == b.y && a.z == b.z; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong3& a, const ulonglong3& b) { return a.x != b.x || a.y != b.y || a.z != b.z; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong3& v, unsigned int i) { return ((unsigned long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong3& v, int i, unsigned long long x) { ((unsigned long long*)(&v))[i] = x; } /* ulonglong4 functions */ /******************************************************************************/ /** additional constructors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const unsigned long long s) { return make_ulonglong4(s, s, s, s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const float4& a) { return make_ulonglong4((unsigned long long)a.x, (unsigned long long)a.y, (unsigned long long)a.z, (unsigned long long)a.w); } /** @} */ /** min * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 min(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z), min(a.w, b.w)); } /** @} */ /** max * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 max(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z), max(a.w, b.w)); } /** @} */ /** add * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator+(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong4& a, const ulonglong4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } /** @} */ /** subtract * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator-(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong4& a, const ulonglong4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } /** @} */ /** multiply * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const ulonglong4& a, const unsigned long long s) { return make_ulonglong4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const unsigned long long s, const ulonglong4& a) { return make_ulonglong4(a.x * s, a.y * s, a.z * s, a.w * s); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong4& a, const unsigned long long s) { a.x *= s; a.y *= s; a.z *= s; a.w *= s; } /** @} */ /** divide * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const ulonglong4& a, const unsigned long long s) { return make_ulonglong4(a.x / s, a.y / s, a.z / s, a.w / s); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const unsigned long long s, const ulonglong4& a) { return make_ulonglong4(s / a.x, s / a.y, s / a.z, s / a.w); } SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(ulonglong4& a, const unsigned long long s) { a.x /= s; a.y /= s; a.z /= s; a.w /= s; } /** @} */ /** clamp * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 clamp(const ulonglong4& v, const unsigned long long a, const unsigned long long b) { return make_ulonglong4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 clamp(const ulonglong4& v, const ulonglong4& a, const ulonglong4& b) { return make_ulonglong4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } /** @} */ /** equality * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong4& a, const ulonglong4& b) { return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w; } SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong4& a, const ulonglong4& b) { return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w; } /** @} */ /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong4& v, unsigned int i) { return ((unsigned long long*)(&v))[i]; } /** If used on the device, this could place the the 'v' in local memory */ SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong4& v, int i, unsigned long long x) { ((unsigned long long*)(&v))[i] = x; } /******************************************************************************/ /** Narrowing functions * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int3& v0) { return make_int2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int4& v0) { return make_int2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int4& v0) { return make_int3(v0.x, v0.y, v0.z); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const uint3& v0) { return make_uint2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const uint4& v0) { return make_uint2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const uint4& v0) { return make_uint3(v0.x, v0.y, v0.z); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const longlong3& v0) { return make_longlong2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const longlong4& v0) { return make_longlong2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const longlong4& v0) { return make_longlong3(v0.x, v0.y, v0.z); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const ulonglong3& v0) { return make_ulonglong2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const ulonglong4& v0) { return make_ulonglong2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const ulonglong4& v0) { return make_ulonglong3(v0.x, v0.y, v0.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float3& v0) { return make_float2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float4& v0) { return make_float2(v0.x, v0.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float4& v0) { return make_float3(v0.x, v0.y, v0.z); } /** @} */ /** Assemble functions from smaller vectors * @{ */ SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int v0, const int2& v1) { return make_int3(v0, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int2& v0, const int v1) { return make_int3(v0.x, v0.y, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0, const int v1, const int2& v2) { return make_int4(v0, v1, v2.x, v2.y); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0, const int2& v1, const int v2) { return make_int4(v0, v1.x, v1.y, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int2& v0, const int v1, const int v2) { return make_int4(v0.x, v0.y, v1, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0, const int3& v1) { return make_int4(v0, v1.x, v1.y, v1.z); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int3& v0, const int v1) { return make_int4(v0.x, v0.y, v0.z, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int2& v0, const int2& v1) { return make_int4(v0.x, v0.y, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const unsigned int v0, const uint2& v1) { return make_uint3(v0, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const uint2& v0, const unsigned int v1) { return make_uint3(v0.x, v0.y, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0, const unsigned int v1, const uint2& v2) { return make_uint4(v0, v1, v2.x, v2.y); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0, const uint2& v1, const unsigned int v2) { return make_uint4(v0, v1.x, v1.y, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint2& v0, const unsigned int v1, const unsigned int v2) { return make_uint4(v0.x, v0.y, v1, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0, const uint3& v1) { return make_uint4(v0, v1.x, v1.y, v1.z); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint3& v0, const unsigned int v1) { return make_uint4(v0.x, v0.y, v0.z, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint2& v0, const uint2& v1) { return make_uint4(v0.x, v0.y, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const long long v0, const longlong2& v1) { return make_longlong3(v0, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const longlong2& v0, const long long v1) { return make_longlong3(v0.x, v0.y, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0, const long long v1, const longlong2& v2) { return make_longlong4(v0, v1, v2.x, v2.y); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0, const longlong2& v1, const long long v2) { return make_longlong4(v0, v1.x, v1.y, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong2& v0, const long long v1, const long long v2) { return make_longlong4(v0.x, v0.y, v1, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0, const longlong3& v1) { return make_longlong4(v0, v1.x, v1.y, v1.z); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong3& v0, const long long v1) { return make_longlong4(v0.x, v0.y, v0.z, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong2& v0, const longlong2& v1) { return make_longlong4(v0.x, v0.y, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const unsigned long long v0, const ulonglong2& v1) { return make_ulonglong3(v0, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const ulonglong2& v0, const unsigned long long v1) { return make_ulonglong3(v0.x, v0.y, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const unsigned long long v0, const unsigned long long v1, const ulonglong2& v2) { return make_ulonglong4(v0, v1, v2.x, v2.y); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const unsigned long long v0, const ulonglong2& v1, const unsigned long long v2) { return make_ulonglong4(v0, v1.x, v1.y, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const ulonglong2& v0, const unsigned long long v1, const unsigned long long v2) { return make_ulonglong4(v0.x, v0.y, v1, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const unsigned long long v0, const ulonglong3& v1) { return make_ulonglong4(v0, v1.x, v1.y, v1.z); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const ulonglong3& v0, const unsigned long long v1) { return make_ulonglong4(v0.x, v0.y, v0.z, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const ulonglong2& v0, const ulonglong2& v1) { return make_ulonglong4(v0.x, v0.y, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float2& v0, const float v1) { return make_float3(v0.x, v0.y, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float v0, const float2& v1) { return make_float3(v0, v1.x, v1.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0, const float v1, const float2& v2) { return make_float4(v0, v1, v2.x, v2.y); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0, const float2& v1, const float v2) { return make_float4(v0, v1.x, v1.y, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float2& v0, const float v1, const float v2) { return make_float4(v0.x, v0.y, v1, v2); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0, const float3& v1) { return make_float4(v0, v1.x, v1.y, v1.z); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float3& v0, const float v1) { return make_float4(v0.x, v0.y, v0.z, v1); } SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float2& v0, const float2& v1) { return make_float4(v0.x, v0.y, v1.x, v1.y); } /** @} */
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/global_configs.py
Python
from types import SimpleNamespace global_configs = SimpleNamespace() # We only care about the "car" category. # # Also see nuscenes_utils.py::map_name_from_general_to_detection # Type A: treat as foreground, and put back # Type B: treat as foreground, and remove # Type C: treat as background, don't care global_configs.nuscenes_extract_class_names = [ "car", # treat as foreground, and put back "truck", "construction_vehicle", "bus", "trailer", # "barrier", # treat as background, don't care "motorcycle", "bicycle", "pedestrian", # "traffic_cone", # treat as background, don't care "ignore", ] global_configs.nuscenes_extract_class_name_to_label = { class_name: i + 1 for i, class_name in enumerate(global_configs.nuscenes_extract_class_names) } global_configs.nuscenes_extract_label_to_class_name = { label: class_name for class_name, label in global_configs.nuscenes_extract_class_name_to_label.items() } global_configs.nuscenes_class_names_to_recon = [ "car", # "truck", # "construction_vehicle", # "bus", # "trailer", ] global_configs.nuscenes_class_labels_to_recon = [ global_configs.nuscenes_extract_class_name_to_label[class_name] for class_name in global_configs.nuscenes_class_names_to_recon ]
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_01_extract_scene.py
Python
import argparse from collections import OrderedDict from pathlib import Path import numpy as np import open3d as o3d from tqdm import tqdm from lit.containers.frame import Frame from lit.containers.scene import Scene from lit.path_utils import get_lit_paths from lit.recon_utils import bbox_to_lineset from lit_tools.global_configs import global_configs from pcdet.config import cfg, cfg_from_yaml_file from pcdet.datasets import NuScenesDataset, WaymoDataset from pcdet.utils import common_utils def extract_scene_waymo(args, cfg, logger, scene_index=None): dataset = WaymoDataset( dataset_cfg=cfg, class_names=["Vehicle", "Pedestrian", "Cyclist"], training=args.split == "train", logger=logger, allow_empty_gt_boxes=True, include_extras=True, ) lit_paths = get_lit_paths( data_version=args.data_version, data_domain="waymo", ) lit_paths.scene_dir.mkdir(parents=True, exist_ok=True) # Count number of frames per scene. map_scene_name_to_num_frames = OrderedDict() for info in dataset.infos: scene_name = info["point_cloud"]["lidar_sequence"] if scene_name not in map_scene_name_to_num_frames: map_scene_name_to_num_frames[scene_name] = 0 map_scene_name_to_num_frames[scene_name] += 1 # Compute scene starts and ends. print(f"Total num scenes: {len(map_scene_name_to_num_frames)}") print(f"Total num frames: {sum(map_scene_name_to_num_frames.values())}") scene_names = list(map_scene_name_to_num_frames.keys()) scene_num_frames = list(map_scene_name_to_num_frames.values()) scene_starts = np.cumsum([0] + scene_num_frames)[:-1] # Inclusive. scene_ends = np.cumsum(scene_num_frames) # Exclusive. # If scene_index is specified, only extract this scene. if scene_index is not None: assert isinstance(scene_index, int) assert ( 0 <= scene_index < len(scene_names) ), f"{scene_index} not in [0, {len(scene_names)})" print(f"Only extracting scene_index={scene_index}: {scene_names[scene_index]}") scene_names = [scene_names[scene_index]] scene_starts = [scene_starts[scene_index]] scene_ends = [scene_ends[scene_index]] # Extract scenes, save each scene as a pkl file. for scene_name, scene_start, scene_end in tqdm( zip(scene_names, scene_starts, scene_ends), desc="Extracting scenes", total=len(scene_names), ): # Init. scene = Scene(scene_name=scene_name) scene_path = lit_paths.scene_dir / f"{scene_name}.pkl" # Skip existing. if args.skip_existing and scene_path.exists(): try: scene = Scene.load(scene_path) except Exception as e: print(f"{scene_path} exists but failed to load, continue processing.") else: print(f"{scene_name} of {len(scene_names)} frames exists. Skipping.") continue # Append frames. for frame_index, global_frame_index in enumerate(range(scene_start, scene_end)): frame_dict = dataset[global_frame_index] # Unpack indices. frame_index = frame_dict["sample_idx"] # e.g 15 # Unpack data. points = frame_dict["points"][:] # (x, y, z, i, e) gt_boxes = frame_dict["gt_boxes"].astype(np.float32) # (N, 8) pose = frame_dict["pose"] # (4, 4) num_points_of_each_lidar = frame_dict["num_points_of_each_lidar"] # (5,) lidar_to_vehicle_poses = frame_dict["lidar_to_vehicle_poses"] # (5, 4, 4) # Check data. if len(points) != np.sum(num_points_of_each_lidar): raise ValueError( f"In {scene_name}, " f"len(points) != np.sum(num_points_of_each_lidar): " f"{len(points)} != {np.sum(num_points_of_each_lidar)}" ) # Print warnings if the frame's gt_boxes are empty. if len(gt_boxes) == 0: print(f"Warning: {scene_name} frame {frame_index} has no gt_boxes.") # Object ids. object_ids = frame_dict["obj_ids"] if not len(object_ids) == len(gt_boxes): raise ValueError( f"len(object_ids) != len(gt_boxes): " f"{len(object_ids)} != {len(gt_boxes)}" ) # Append. scene.append_frame( Frame( scene_name=scene_name, frame_index=frame_index, frame_pose=pose, lidar_to_vehicle_poses=lidar_to_vehicle_poses, num_points_of_each_lidar=num_points_of_each_lidar, local_points=points, local_bboxes=gt_boxes, object_ids=object_ids, ), check_valid=True, ) # Visualize frame. visualize_frame = False if visualize_frame: # Visualize. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points[:, :3]) ls = o3d.geometry.LineSet() for gt_box in gt_boxes: ls += bbox_to_lineset(gt_box) coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0) o3d.visualization.draw_geometries([pcd, ls, coord]) # Save. scene.save(path=scene_path, verbose=False) def extract_scene_nuscenes(args, cfg, logger, scene_index=None): # # KITTI # all_kitti_class_names = [ # "Car", # "Van", # "Truck", # "Pedestrian", # "Person", # "Cyclist", # "Tram", # "Misc", # ] dataset = NuScenesDataset( dataset_cfg=cfg, class_names=global_configs.nuscenes_extract_class_names, training=args.split == "train", logger=logger, include_extras=True, ) # Make sure that there are no SHIFT_COOR applied. This way, the # reconstructed geometry has the same coordinate as the raw dataset. # See README_coordinates.md for more details. if "SHIFT_COOR" in dataset.dataset_cfg: assert np.allclose(dataset.dataset_cfg["SHIFT_COOR"], [0, 0, 0]) lit_paths = get_lit_paths( data_version=args.data_version, data_domain="nuscenes", ) lit_paths.scene_dir.mkdir(parents=True, exist_ok=True) # Count the number of frames per scene. This can also be read from # the scene.json file or the related APIs. However, we directly get this # info from the dataset.info as we need to get them in a specific order. map_scene_name_to_num_frames = OrderedDict() for info in dataset.infos: sample_dict = dataset.nusc.get("sample", info["token"]) scene_dict = dataset.nusc.get("scene", sample_dict["scene_token"]) scene_name = scene_dict["token"] if scene_name not in map_scene_name_to_num_frames: map_scene_name_to_num_frames[scene_name] = 0 map_scene_name_to_num_frames[scene_name] += 1 # Then, we cross validate this result wih the one retrieved from the API. for scene_name in map_scene_name_to_num_frames: scene_dict = dataset.nusc.get("scene", scene_name) assert map_scene_name_to_num_frames[scene_name] == scene_dict["nbr_samples"] # Compute scene starts and ends. print(f"Total num scenes: {len(map_scene_name_to_num_frames)}") print(f"Total num frames: {sum(map_scene_name_to_num_frames.values())}") scene_names = list(map_scene_name_to_num_frames.keys()) scene_num_frames = list(map_scene_name_to_num_frames.values()) scene_starts = np.cumsum([0] + scene_num_frames)[:-1] # Inclusive. scene_ends = np.cumsum(scene_num_frames) # Exclusive. # If scene_index is specified, only extract this scene. if scene_index is not None: assert isinstance(scene_index, int) assert ( 0 <= scene_index < len(scene_names) ), f"{scene_index} not in [0, {len(scene_names)})" print(f"Only extracting scene_index={scene_index}: {scene_names[scene_index]}") scene_names = [scene_names[scene_index]] scene_starts = [scene_starts[scene_index]] scene_ends = [scene_ends[scene_index]] # Extract scenes, save each scene as a pkl file. for scene_name, scene_start, scene_end in tqdm( zip(scene_names, scene_starts, scene_ends), desc="Extracting scenes", total=len(scene_names), ): # Init. scene_is_valid = True scene = Scene(scene_name=scene_name) scene_path = lit_paths.scene_dir / f"{scene_name}.pkl" # Skip existing. if args.skip_existing and scene_path.exists(): scene = Scene.load(scene_path) print(f"{scene_name} of {len(scene_names)} frames exists. Skipping.") # Append frames. for proposed_frame_index, global_frame_index in enumerate( range(scene_start, scene_end) ): frame_dict = dataset[global_frame_index] # Unpack indices. frame_index = frame_dict["sample_idx"] # Unpack data. points = frame_dict["points"][:] # (x, y, z) gt_boxes = frame_dict["gt_boxes"].astype( np.float32 ) # (x, y, z, dx, dy, dz, heading, label) pose = frame_dict["pose"] # (4, 4) num_points_of_each_lidar = frame_dict["num_points_of_each_lidar"] # (1,) lidar_to_vehicle_poses = frame_dict["lidar_to_vehicle_poses"] # (1, 4, 4) #################################################################### # Special treatment for NuScenes pose #################################################################### # The points we get here are in the lidar coordinate. However, the # points we get for Waymo is in the vehicle (ego) coordinate. # We have two options: # # 1. Transform points. # Transform points to vehicle coordinate and use this new points # from now on. The problem with this option is that we also need # to transform the gt_boxes, which is not trivial. # 2. Transform poses (selected). # - frame_pose <- frame_pose @ lidar_to_vehicle_pose # - lidar_to_vehicle_pose <- identity # This way, we can use the original points from now on. We use # this option. assert len(lidar_to_vehicle_poses) == 1 pose = (pose @ lidar_to_vehicle_poses[0]).astype(np.float32) lidar_to_vehicle_poses[0] = np.eye(4, dtype=np.float32) #################################################################### # Check data. if len(points) != np.sum(num_points_of_each_lidar): raise ValueError( f"In {scene_name}, " f"len(points) != np.sum(num_points_of_each_lidar): " f"{len(points)} != {np.sum(num_points_of_each_lidar)}" ) # Print warnings if the frame's gt_boxes are empty. if len(gt_boxes) == 0: print(f"Warning: {scene_name} frame {frame_index} has no gt_boxes.") # Object ids. object_ids = frame_dict["obj_ids"] if not len(object_ids) == len(gt_boxes): raise ValueError( f"len(object_ids) != len(gt_boxes): " f"{len(object_ids)} != {len(gt_boxes)}" ) # Append. if proposed_frame_index != frame_index: print( f"[Invalid Scene] {scene_name} frame frame_index={frame_index}, " f"but proposed_frame_index={proposed_frame_index}. " ) scene_is_valid = False break # Append. scene.append_frame( Frame( scene_name=scene_name, frame_index=frame_index, frame_pose=pose, lidar_to_vehicle_poses=lidar_to_vehicle_poses, num_points_of_each_lidar=num_points_of_each_lidar, local_points=points, local_bboxes=gt_boxes, object_ids=object_ids, ), check_valid=True, ) # Visualize frame. visualize_frame = False if visualize_frame: # Print a summary of number of box per class. print(f"Total number of boxes: {len(gt_boxes)}") print("Number of boxes per class:") for class_name in global_configs.nuscenes_extract_class_names: num_boxes = np.sum( gt_boxes[:, -1] == global_configs.nuscenes_extract_class_names.index(class_name) + 1 ) print(f" - {class_name}: {num_boxes}") # Visualize. pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points[:, :3]) ls = o3d.geometry.LineSet() for gt_box in gt_boxes: ls += bbox_to_lineset(gt_box) coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0) o3d.visualization.draw_geometries([pcd, ls, coord]) # Visualize scene. visualize = False if visualize: scene.visualize() # Save scene. if scene_is_valid: scene.save(path=scene_path, verbose=False) def main(): # python lit_01_extract_scene.py --cfg_file ../tools/cfgs/dataset_configs/waymo_dataset_extract.yaml --split train # python lit_01_extract_scene.py --cfg_file ../tools/cfgs/dataset_configs/waymo_dataset_extract.yaml --split valid # python lit_01_extract_scene.py --cfg_file ../tools/cfgs/dataset_configs/nuscenes_dataset_extract.yaml --split train # python lit_01_extract_scene.py --cfg_file ../tools/cfgs/dataset_configs/nuscenes_dataset_extract.yaml --split valid parser = argparse.ArgumentParser() parser.add_argument( "--cfg_file", type=str, default=None, help="Config path", ) parser.add_argument( "--split", type=str, default="train", help="Split of the dataset", choices=["train", "valid"], ) parser.add_argument( "--data_version", type=str, default="v1", help="Data version", ) parser.add_argument( "--skip_existing", action="store_true", help="Skip existing files", ) parser.add_argument( "--scene_index", type=int, default=None, help="Scene index, when specified, only extract this scene", ) args = parser.parse_args() cfg_from_yaml_file(args.cfg_file, cfg) np.random.seed(1024) logger = common_utils.create_logger("/tmp/st3d_extract.log") # Make sure we don't use the wrong config file # - Pre-processing shall be disabled. # - Shuffle shall be disabled. # - Batch size shall be 1. # - Augmentation shall be disabled. assert Path(args.cfg_file).name in [ "waymo_dataset_extract.yaml", "waymo_dataset_info.yaml", "nuscenes_dataset_extract.yaml", ] if cfg["DATASET"] == "WaymoDataset": extract_scene_waymo(args, cfg, logger, scene_index=args.scene_index) elif cfg["DATASET"] == "NuScenesDataset": extract_scene_nuscenes(args, cfg, logger, scene_index=args.scene_index) else: raise ValueError(f"Unknown dataset: {cfg['DATASET']}") if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_02_recon_bg.py
Python
import argparse import copy from types import SimpleNamespace import open3d as o3d from tqdm import tqdm from lit.bg_reconstructor import BGReconstructor from lit.containers.scene import Scene from lit.path_utils import LitPaths, get_lit_paths from lit.recon_utils import remove_statistical_outlier _waymo_args = SimpleNamespace() _waymo_args.skip_every_n_frames = 2 _waymo_args.enabled_lidars = (0,) _waymo_args.expand_box_ratio = 1.0 _waymo_args.raise_bbox = 0.4 _waymo_args.nksr_voxel_size = 0.25 _waymo_args.nksr_chunked = False _waymo_args.per_frame_rso_nb_neighbors = 0 _waymo_args.per_frame_rso_std_ratio = 0.0 _waymo_args.rso_nb_neighbors = 0 _waymo_args.rso_std_ratio = 0.0 _nuscenes_args = SimpleNamespace() _nuscenes_args.skip_every_n_frames = 1 _nuscenes_args.enabled_lidars = (0,) _nuscenes_args.expand_box_ratio = 1.05 _nuscenes_args.raise_bbox = 0.0 _nuscenes_args.nksr_voxel_size = 0.25 _nuscenes_args.nksr_chunked = False _nuscenes_args.per_frame_rso_nb_neighbors = 0 _nuscenes_args.per_frame_rso_std_ratio = 0.0 _nuscenes_args.rso_nb_neighbors = 0 _nuscenes_args.rso_std_ratio = 0.0 def recon_bg( scene_name: str, lit_paths: LitPaths, skip_existing=False, dray_run=False, ): if lit_paths.data_domain == "waymo": args = _waymo_args elif lit_paths.data_domain == "nuscenes": args = _nuscenes_args else: raise ValueError(f"Unknown dataset type: {lit_paths.data_domain}") # Paths of input and output. scene_path = lit_paths.scene_dir / f"{scene_name}.pkl" mesh_path = lit_paths.bg_dir / f"{scene_name}.ply" # Skip existing. if skip_existing and mesh_path.exists(): print(f"Skipped {mesh_path}.") return # Load. scene = Scene.load(scene_path) scene.sample_by_indices(range(0, len(scene), args.skip_every_n_frames)) bg_data = scene.extract_bg( enabled_lidars=args.enabled_lidars, remove_foreground=True, raise_bbox=args.raise_bbox, expand_box_ratio=args.expand_box_ratio, per_frame_rso_nb_neighbors=args.per_frame_rso_nb_neighbors, per_frame_rso_std_ratio=args.per_frame_rso_std_ratio, ) points = bg_data["points"] lidar_centers = bg_data["lidar_centers"] # Remove statistical outlier. points, lidar_centers = remove_statistical_outlier( points=points, lidar_centers=lidar_centers, nb_neighbors=args.rso_nb_neighbors, std_ratio=args.rso_std_ratio, ) visualize_points = False if visualize_points: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) o3d.visualization.draw_geometries([pcd]) # Init reconstructor. bgr = BGReconstructor( voxel_size=args.nksr_voxel_size, chunked=args.nksr_chunked, ) mesh = bgr.recon( points=points, lidar_centers=lidar_centers, ) visualize_mesh = False if visualize_mesh: pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points) mesh.compute_vertex_normals() o3d.visualization.draw_geometries([mesh]) # Save. if dray_run: print("Dry run. Not saving.") else: o3d.io.write_triangle_mesh(str(mesh_path), mesh) print(f"Saved to {mesh_path}.") def main(): # CUDA_VISIBLE_DEVICES=0 python lit_02_recon_bg.py --data_domain waymo --data_version v0 --skip_existing # CUDA_VISIBLE_DEVICES=1 python lit_02_recon_bg.py --data_domain waymo --data_version v0 --reverse --skip_existing # CUDA_VISIBLE_DEVICES=0 python lit_02_recon_bg.py --data_domain nuscenes --data_version v0 --skip_existing # CUDA_VISIBLE_DEVICES=1 python lit_02_recon_bg.py --data_domain nuscenes --data_version v0 --reverse --skip_existing parser = argparse.ArgumentParser() parser.add_argument( "--data_domain", type=str, required=True, choices=("waymo", "nuscenes"), ) parser.add_argument( "--data_version", type=str, required=True, help="Version of the data, which determines the lit_paths.", ) parser.add_argument( "--reverse", action="store_true", help="Reverse the order of scenes.", ) parser.add_argument( "--skip_existing", action="store_true", help="Skip existing scenes that have been processed.", ) parser.add_argument( "--dry_run", action="store_true", help="Dry run without saving.", ) parser.add_argument( "--scene_index", type=int, default=None, help="Scene index, when specified, only process this scene", ) args = parser.parse_args() # Get lit_paths. lit_paths = get_lit_paths( data_version=args.data_version, data_domain=args.data_domain, ) # Get scenes to process. scene_names = copy.copy(lit_paths.scene_names) scene_paths = [ lit_paths.scene_dir / f"{scene_name}.pkl" for scene_name in scene_names ] if args.scene_index is not None: assert isinstance(args.scene_index, int) assert ( 0 <= args.scene_index < len(scene_names) ), f"{args.scene_index} not in [0, {len(scene_names)})" print( f"Only extracting scene_index={args.scene_index}: " f"{scene_names[args.scene_index]}" ) scene_names = [scene_names[args.scene_index]] # Check scene_names have all been extracted. scene_paths_extracted = sorted(list(lit_paths.scene_dir.glob("*.pkl"))) is_all_extracted = True for scene_path in scene_paths: if scene_path not in scene_paths_extracted: is_all_extracted = False print(f"{scene_path} has not been extracted.") if not is_all_extracted: raise ValueError("Not all scenes have been extracted. Aborting.") # Create output dir for reconstructed bg meshes. lit_paths.bg_dir.mkdir(exist_ok=True, parents=True) # Reverse the order of scenes. if args.reverse: scene_names = scene_names[::-1] # Process sequentially. for scene_name in tqdm(scene_names, desc="Reconstructing bg meshes"): recon_bg( scene_name=scene_name, lit_paths=lit_paths, skip_existing=args.skip_existing, dray_run=args.dry_run, ) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_03_recon_fg.py
Python
import argparse import copy import pickle import sys import numpy as np import open3d as o3d from tqdm import tqdm from lit.containers.fg_scene import FGScene from lit.containers.scene import Scene from lit.fg_reconstructor import FGReconstructor from lit.path_utils import LitPaths, get_lit_paths from lit.recon_utils import bbox_to_lineset from lit_tools.global_configs import global_configs sys.excepthook = lambda et, ev, tb: ( None if issubclass(et, (KeyboardInterrupt, SystemExit)) else (print(f"Unhandled exception: {ev}"), __import__("ipdb").post_mortem(tb)) ) def recon_fg( scene_name: str, fgr: FGReconstructor, lit_paths: LitPaths, skip_existing: bool = False, verbose: bool = False, dry_run: bool = False, ): # Maybe skip existing. fg_path = lit_paths.fg_dir / f"{scene_name}.pkl" if skip_existing and fg_path.exists(): print(f"Skipping existing {fg_path}.") return # Load. scene_path = lit_paths.scene_dir / f"{scene_name}.pkl" scene = Scene.load(scene_path) # Extract foreground. if lit_paths.data_domain == "waymo": fg_boxes = scene.extract_fg(select_labels=[1], verbose=False) elif lit_paths.data_domain == "nuscenes": # These classes will be reconstructed and put back. fg_boxes = scene.extract_fg( select_labels=global_configs.nuscenes_class_labels_to_recon, verbose=False, ) else: raise ValueError(f"Unknown data_domain: {lit_paths.data_domain}") # Group fg_boxes. fg_scene = FGScene.from_fg_boxes(fg_boxes) print(f"Grouped {len(fg_boxes)} fg_boxes into {len(fg_scene)} groups.") # Visualize groups. # Each group is colored with a random color. visualize_groups = False if visualize_groups: all_fg_object_ls = o3d.geometry.LineSet() for fg_object in fg_scene: fg_object_color = np.random.rand(3) fg_object_ls = o3d.geometry.LineSet() for fg_box in fg_object.fg_boxes: ls_fg_box = bbox_to_lineset( fg_box.local_bbox, frame_pose=fg_box.frame_pose ) ls_fg_box.paint_uniform_color(fg_object_color) fg_object_ls += ls_fg_box all_fg_object_ls += fg_object_ls o3d.visualization.draw_geometries([all_fg_object_ls]) # Reconstruct foreground mesh for each group. for fg_object in tqdm( fg_scene, desc="Reconstructing fg_objects", total=len(fg_scene), disable=not verbose, ): # Reconstruct. mesh = fgr.recon_fg_object(fg_object) fg_object.mesh_vertices = np.asarray(mesh.vertices).astype(np.float32) fg_object.mesh_triangles = np.asarray(mesh.triangles) # Visualize. visualize_group_mesh = False if visualize_group_mesh: fg_box = fg_object[0] # They are all visualize the same. pseudo_pose = fg_box.compute_local_pseudo_pose() world_mesh = copy.deepcopy(mesh) world_mesh = world_mesh.transform(fg_box.frame_pose @ pseudo_pose) world_mesh.compute_vertex_normals() world_ls = bbox_to_lineset( fg_box.local_bbox, frame_pose=fg_box.frame_pose, ) bbox_label = fg_box.local_bbox[7] title = f"label: {bbox_label}" if lit_paths.data_domain == "nuscenes": class_name = global_configs.nuscenes_extract_label_to_class_name[ bbox_label ] title += f", class: {class_name}" o3d.visualization.draw_geometries([world_mesh, world_ls], window_name=title) # Visualize all groups in a combined mesh. visualize_groups_mesh = False if visualize_groups_mesh: frame_index = 0 groups_mesh = o3d.geometry.TriangleMesh() for fg_object in fg_scene: # Check if the group has a mesh in this frame. target_fg_box = None for fg_box in fg_object.fg_boxes: if fg_box.frame_index == frame_index: target_fg_box = fg_box break if target_fg_box is None: continue # Transform the mesh to the world frame. mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(fg_object.mesh_vertices) mesh.triangles = o3d.utility.Vector3iVector(fg_object.mesh_triangles) pseudo_pose = target_fg_box.compute_local_pseudo_pose() mesh = mesh.transform(target_fg_box.frame_pose @ pseudo_pose) groups_mesh += mesh groups_mesh.compute_vertex_normals() o3d.visualization.draw_geometries([groups_mesh]) # Save the fg_scene. if dry_run: print(f"Dry run: Not saving {fg_path}.") else: fg_scene.save(fg_path) def main(): """ CUDA_VISIBLE_DEVICES=0 python lit_03_recon_fg.py --data_domain waymo --data_version v0 --skip_existing CUDA_VISIBLE_DEVICES=1 python lit_03_recon_fg.py --data_domain waymo --data_version v0 --reverse --skip_existing CUDA_VISIBLE_DEVICES=0 python lit_03_recon_fg.py --data_domain waymo --data_version v0 --shuffle --skip_existing CUDA_VISIBLE_DEVICES=1 python lit_03_recon_fg.py --data_domain waymo --data_version v0 --shuffle --skip_existing CUDA_VISIBLE_DEVICES=0 python lit_03_recon_fg.py --data_domain nuscenes --data_version v0 --skip_existing CUDA_VISIBLE_DEVICES=1 python lit_03_recon_fg.py --data_domain nuscenes --data_version v0 --reverse --skip_existing CUDA_VISIBLE_DEVICES=0 python lit_03_recon_fg.py --data_domain nuscenes --data_version v0 --shuffle --skip_existing CUDA_VISIBLE_DEVICES=1 python lit_03_recon_fg.py --data_domain nuscenes --data_version v0 --shuffle --skip_existing """ parser = argparse.ArgumentParser() parser.add_argument( "--data_domain", type=str, required=True, choices=("waymo", "nuscenes"), ) parser.add_argument( "--data_version", type=str, required=True, help="Version of the data, which determines the lit_paths.", ) parser.add_argument( "--reverse", action="store_true", help="Reverse the order of scenes.", ) parser.add_argument( "--shuffle", action="store_true", help="Shuffle the order of scenes.", ) parser.add_argument( "--skip_existing", action="store_true", help="Skip existing scenes that have been processed.", ) parser.add_argument( "--dry_run", action="store_true", help="Dry run without saving.", ) parser.add_argument( "--verbose", action="store_true", help="Print more information.", ) parser.add_argument( "--scene_index", type=int, default=None, help="Scene index, when specified, only process this scene", ) args = parser.parse_args() # Init paths. lit_paths = get_lit_paths( data_version=args.data_version, data_domain=args.data_domain, ) lit_paths.fg_dir.mkdir(exist_ok=True, parents=True) # Get scene names. scene_names = copy.copy(lit_paths.scene_names) if args.scene_index is not None: assert isinstance(args.scene_index, int) assert ( 0 <= args.scene_index < len(scene_names) ), f"{args.scene_index} not in [0, {len(scene_names)})" print( f"Only extracting scene_index={args.scene_index}: " f"{scene_names[args.scene_index]}" ) scene_names = [scene_names[args.scene_index]] if args.reverse: scene_names = list(reversed(scene_names)) if args.shuffle: np.random.shuffle(scene_names) print(f"Found {len(scene_names)} scenes.") # Determine whether to use DeepSDF. if lit_paths.data_version in {"v0", "v1"}: use_deepsdf = True else: raise ValueError(f"Unknown data_version: {lit_paths.data_version}") # Init FGReconstructor. fgr = FGReconstructor( use_deepsdf=use_deepsdf, ) # Run foreground reconstruction. for scene_name in tqdm(scene_names, desc="Reconstructing fg_objects."): print(f"Processing {scene_name}...") recon_fg( scene_name, fgr=fgr, lit_paths=lit_paths, skip_existing=args.skip_existing, verbose=args.verbose, dry_run=args.dry_run, ) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_04_sim.py
Python
""" This implements lidar simulation on NuScenes or Waymo. """ import argparse import copy import json import pickle import time import camtools as ct import numpy as np import open3d as o3d from tqdm import tqdm from lit.containers.fg_scene import FGScene from lit.containers.scene import Scene from lit.containers.sim_frame import SimFrame from lit.containers.sim_scene import SimScene from lit.fg_reconstructor import FGReconstructor from lit.lidar import ( KITTILidarIntrinsics, Lidar, NuScenesLidarIntrinsics, NuScenesVanillaLidarIntrinsics, WaymoLidarIntrinsics, ) from lit.path_utils import LitPaths, get_lit_paths from lit.raycast_engine import RaycastEngine from lit.raycast_engine_cpu import RaycastEngineCPU from lit.raycast_engine_gpu import RaycastEngineGPU from lit.recon_utils import ( bbox_to_corners, bboxes_to_lineset, incident_angles_to_colors, ) class CanonicalMeshCache: """ Shared mesh for ablation study. """ def __init__(self, max_capacity: int): self.max_capacity = max_capacity self.list_vertices = [] self.list_triangles = [] def is_full(self): return len(self.list_vertices) >= self.max_capacity def insert(self, vertices, triangles): self.list_vertices.append(vertices) self.list_triangles.append(triangles) def get_random_mesh(self): if not self.is_full(): raise ValueError( f"Cache is not full yet: {len(self.list_vertices)} " f"out of {self.max_capacity} filled." ) idx = np.random.randint(len(self.list_vertices)) vertices = self.list_vertices[idx] triangles = self.list_triangles[idx] return vertices, triangles def run_sim( data_version: str, src_style: str, dst_style: str, lit_paths: LitPaths, scene_name: str, enable_scaling: bool, raycast_engine: RaycastEngine, skip_existing: bool = False, dry_run: bool = False, mesh_cache: CanonicalMeshCache = None, ablation_foreground_only: bool = False, ): # Get destination paths. if dst_style == "waymo": sim_scene_dir = lit_paths.sim_waymo_dir / scene_name lidar_intrinsics = WaymoLidarIntrinsics() elif dst_style == "nuscenes": sim_scene_dir = lit_paths.sim_nuscenes_dir / scene_name lidar_intrinsics = NuScenesLidarIntrinsics() elif dst_style == "nuscenes_vanilla": # Special case for vanilla nuscenes lidar without statistical modeling. sim_scene_dir = lit_paths.lit_data_root / "sim_nuscenes_vanilla" / scene_name lidar_intrinsics = NuScenesVanillaLidarIntrinsics() elif dst_style == "kitti": sim_scene_dir = lit_paths.sim_kitti_dir / scene_name lidar_intrinsics = KITTILidarIntrinsics() else: raise NotImplementedError(f"Unknown dst_style: {dst_style}") # Get src->dst scaling factors. # data/stats/src_to_dst_bbox_scales.json if enable_scaling: scales_path = lit_paths.data_root / "stats" / "src_to_dst_bbox_scales.json" with open(scales_path, "r") as f: scales_dict = json.load(f) src_to_dst_scales = np.array( scales_dict[f"{src_style}_to_{dst_style}_bbox_scale"] ) print(f"Loaded src_to_dst_scales: {src_to_dst_scales}") else: src_to_dst_scales = None print("src_to_dst_scales is set to None.") # Skip if sim_dir exists (we won't check the content of sim_dir) if skip_existing and sim_scene_dir.exists(): print(f"Skipping {scene_name} as it already exists.") return # Load scene. scene_path = lit_paths.scene_dir / f"{scene_name}.pkl" scene = Scene.load(scene_path) num_frames = len(scene) # Load fg groups. fg_path = lit_paths.fg_dir / f"{scene_name}.pkl" fg_scene = FGScene.load(fg_path) if mesh_cache is not None: for fg_object in fg_scene: if not mesh_cache.is_full(): # Insert mesh into cache. vertices = fg_object.mesh_vertices triangles = fg_object.mesh_triangles mesh_cache.insert(vertices=vertices, triangles=triangles) else: # Retrieve random mesh from cache. vertices, triangles = mesh_cache.get_random_mesh() # Get avg_centered_corners for scaling. fused_centered_corners = [] for fg_box in fg_object.fg_boxes: pseudo_pose = fg_box.compute_local_pseudo_pose() pseudo_T = ct.convert.pose_to_T(pseudo_pose) local_corners = bbox_to_corners(fg_box.local_bbox) centered_corners = ct.transform.transform_points( local_corners, pseudo_T ) fused_centered_corners.append(centered_corners) fused_centered_corners = np.array(fused_centered_corners) avg_centered_corners = np.mean(fused_centered_corners, axis=0) # Scale vertices with FGReconstructor.resize_mesh_to_fit_bbox. mesh = o3d.geometry.TriangleMesh() mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.triangles = o3d.utility.Vector3iVector(triangles) mesh = FGReconstructor.resize_mesh_to_fit_bbox( mesh=mesh, axis_aligned_centered_corners=avg_centered_corners, ) new_vertices = np.asarray(mesh.vertices) new_triangles = np.asarray(mesh.triangles) # Apply replacement. fg_object.mesh_vertices = new_vertices fg_object.mesh_triangles = new_triangles # Load bg mesh. bg_mesh_path = lit_paths.bg_dir / f"{scene_name}.ply" bg_mesh = o3d.io.read_triangle_mesh(str(bg_mesh_path)) if len(bg_mesh.vertices) == 0: print(f"[Warning] Empty bg mesh: {bg_mesh_path}, skipping.") # Simulate frame by frame. raycast_times = [] sim_scene = SimScene() for frame_index in tqdm( range(num_frames), desc="Simulating frames", total=num_frames ): # Get poses. frame = scene[frame_index] frame_pose = frame.frame_pose lidar_to_vehicle_poses = frame.lidar_to_vehicle_poses # Get mesh. fg_mesh = fg_scene.get_frame_mesh( frame_index=frame_index, src_to_dst_scales=src_to_dst_scales, ) if ablation_foreground_only: fused_mesh = fg_mesh else: fused_mesh = fg_mesh + bg_mesh # Get lidar. lidar_pose = frame_pose @ lidar_to_vehicle_poses[0] lidar = Lidar(intrinsics=lidar_intrinsics, pose=lidar_pose) # Get points. start_time = time.time() # Create a dummy mesh if mesh is empty. if len(fused_mesh.vertices) == 0: fused_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.0001) cast_result = raycast_engine.lidar_intersect_mesh( lidar=lidar, mesh=fused_mesh, ) if isinstance(cast_result, tuple): points, incident_angles = cast_result else: points = cast_result incident_angles = np.zeros(len(points)) raycast_times.append(time.time() - start_time) # Convert points from global to local coordinates with frame_pose. local_points = ct.transform.transform_points( points, ct.convert.pose_to_T(frame_pose) ) # Get local bboxes. local_bboxes = fg_scene.get_frame_local_bboxes( frame_index=frame_index, src_to_dst_scales=src_to_dst_scales, ) # Append. sim_frame = SimFrame( frame_index=frame_index, frame_pose=frame_pose, local_points=local_points, local_bboxes=local_bboxes, incident_angles=incident_angles, ) sim_scene.append_frame(sim_frame) # Visualize. visualize = False if visualize: fg_frame_ls_unscaled = fg_scene.get_frame_ls( frame_index=frame_index, ) fg_frame_ls_unscaled.paint_uniform_color([0, 0, 1]) fg_frame_ls_scaled = bboxes_to_lineset( bboxes=local_bboxes, frame_pose=frame_pose, ) fg_frame_ls_scaled.paint_uniform_color([1, 0, 0]) fused_mesh.compute_vertex_normals() points_pcd = o3d.geometry.PointCloud() points_pcd.points = o3d.utility.Vector3dVector(points) colors = incident_angles_to_colors(incident_angles) points_pcd.colors = o3d.utility.Vector3dVector(colors) o3d.visualization.draw_geometries( [ fg_frame_ls_unscaled, fg_frame_ls_scaled, fused_mesh, points_pcd, ] ) if dry_run: total_raycast_time = sum(raycast_times) print(f"[Dry run] scene raycast time: {total_raycast_time}") return # Save as frames in local coordinates. sim_scene_dir.mkdir(parents=True, exist_ok=True) sim_scene.save_sim_frames(sim_scene_dir=sim_scene_dir) def main(): """ # Example commands: CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style kitti --skip_existing --data_version v0 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style kitti --skip_existing --data_version v0 --reverse CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style kitti --skip_existing --data_version v0 --shuffle CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style kitti --skip_existing --data_version v0 --shuffle CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style nuscenes --skip_existing --data_version v0 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style nuscenes --skip_existing --data_version v0 --reverse CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style nuscenes --skip_existing --data_version v0 --shuffle CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style nuscenes --skip_existing --data_version v0 --shuffle CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v0 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v0 --reverse # Ablation CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v5 --ablation_num_shared_meshes 1 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v5 --ablation_num_shared_meshes 1 --reverse CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v5 --ablation_num_shared_meshes 1 --shuffle CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v6 --ablation_num_shared_meshes 50 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v6 --ablation_num_shared_meshes 50 --reverse CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v6 --ablation_num_shared_meshes 50 --shuffle CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v6 --ablation_num_shared_meshes 50 --shuffle CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style nuscenes --dst_style kitti --skip_existing --data_version v7 --ablation_foreground_only # Simulation for visualization python lit_04_sim.py --src_style waymo --dst_style nuscenes --data_version v8 python lit_04_sim.py --src_style waymo --dst_style kitti --data_version v8 # Self-translation CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style waymo --skip_existing --data_version v9 CUDA_VISIBLE_DEVICES=0 python lit_04_sim.py --src_style waymo --dst_style waymo --skip_existing --data_version v9 --reverse CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style waymo --skip_existing --data_version v9 --shuffle CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style waymo --dst_style waymo --skip_existing --data_version v9 --shuffle CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style nuscenes --skip_existing --data_version v9 CUDA_VISIBLE_DEVICES=1 python lit_04_sim.py --src_style nuscenes --dst_style nuscenes --skip_existing --data_version v9 --reverse """ parser = argparse.ArgumentParser() parser.add_argument( "--src_style", type=str, required=True, choices=("waymo", "nuscenes"), ) parser.add_argument( "--dst_style", type=str, choices=["waymo", "nuscenes", "kitti", "nuscenes_vanilla"], required=True, help="lidar style, choose from nuscenes, kitti, waymo", ) parser.add_argument( "--data_version", type=str, required=True, help="Version of the data, which determines the lit_paths.", ) parser.add_argument( "--shuffle", action="store_true", help="Shuffle the order of scenes.", ) parser.add_argument( "--enable_scaling", action="store_true", help="Whether to enable src->dst shape size scaling.", ) parser.add_argument( "--reverse", action="store_true", help="Reverse the order of scenes.", ) parser.add_argument( "--dry_run", action="store_true", help="Dry run without saving.", ) parser.add_argument( "--skip_existing", action="store_true", help="Skip existing scenes that have been processed.", ) parser.add_argument( "--cpu", action="store_true", help="Use CPU instead of GPU.", ) parser.add_argument( "--ablation_num_shared_meshes", type=int, default=0, help="Number of shared meshes for ablation study.", ) parser.add_argument( "--ablation_foreground_only", action="store_true", help="Only simulate foreground.", ) parser.add_argument( "--scene_index", type=int, default=None, help="Scene index, when specified, only process this scene", ) args = parser.parse_args() # Get project paths. lit_paths = get_lit_paths( data_version=args.data_version, data_domain=args.src_style, ) # Select scenes if scene_index is specified. scene_names = copy.copy(lit_paths.scene_names) if args.scene_index is not None: assert isinstance(args.scene_index, int) assert ( 0 <= args.scene_index < len(scene_names) ), f"{args.scene_index} not in [0, {len(scene_names)})" print( f"Only extracting scene_index={args.scene_index}: " f"{scene_names[args.scene_index]}" ) scene_names = [scene_names[args.scene_index]] # Find valid scene names that have both fg and bg. valid_scene_names = [] for scene_name in scene_names: fg_path = lit_paths.fg_dir / f"{scene_name}.pkl" bg_path = lit_paths.bg_dir / f"{scene_name}.ply" if fg_path.exists() and bg_path.exists(): valid_scene_names.append(scene_name) print(f"- # scenes: {len(scene_names)}") print(f"- # valid scenes: {len(valid_scene_names)}") scene_names = valid_scene_names if args.reverse: scene_names = scene_names[::-1] print("Reversed valid_scene_names") if args.shuffle: shuffle_indices = np.random.permutation(len(scene_names)) scene_names = [scene_names[i] for i in shuffle_indices] print("Shuffled valid_scene_names") if args.ablation_num_shared_meshes > 0: mesh_cache = CanonicalMeshCache(max_capacity=args.ablation_num_shared_meshes) else: mesh_cache = None # Run simulation. if args.cpu: raycast_engine = RaycastEngineCPU() else: raycast_engine = RaycastEngineGPU() for scene_name in tqdm(scene_names, desc="Simulating scenes"): run_sim( data_version=args.data_version, src_style=args.src_style, dst_style=args.dst_style, lit_paths=lit_paths, scene_name=scene_name, raycast_engine=raycast_engine, enable_scaling=args.enable_scaling, skip_existing=args.skip_existing, dry_run=args.dry_run, mesh_cache=mesh_cache, ablation_foreground_only=args.ablation_foreground_only, ) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_raykeep_dataset.py
Python
import argparse import time import camtools as ct import numpy as np import open3d as o3d from matplotlib import pyplot as plt from pykdtree.kdtree import KDTree from tqdm import tqdm from lit.containers.scene import Scene from lit.containers.sim_scene import SimScene from lit.lidar import ( Lidar, NuScenesLidarIntrinsics, WaymoLidarIntrinsics, ) from lit.ray_keeper import GBMRayKeeper def find_closest_directions(query_dirs, model_dirs): """ Find the index of the closest direction in model_dirs for each direction in query_dirs using pykdtree. By law of cosines, d = sqrt(a^2 + b^2 - 2 * a * b * cos(theta)). Computing the minimal d is equivalent to finding the minimal theta between two directions. Args: query_dirs: [N, 3] array, N unit vectors representing directions. model_dirs: [M, 3] array, M unit vectors representing standard directions. Returns: indices: [N] array, indices of the closest direction in model_dirs for each direction in query_dirs. """ # Check shape. assert query_dirs.ndim == 2 and query_dirs.shape[1] == 3 assert model_dirs.ndim == 2 and model_dirs.shape[1] == 3 # Normalize. query_dirs = query_dirs / np.linalg.norm(query_dirs, axis=1, keepdims=True) model_dirs = model_dirs / np.linalg.norm(model_dirs, axis=1, keepdims=True) # Create KDTree from model directions start = time.time() kdtree = KDTree(model_dirs) print(f"KDTree creation time: {time.time() - start:.3f}s") # Query the KDTree for the nearest neighbor of each query direction start = time.time() _, indices = kdtree.query(query_dirs, k=1) print(f"KDTree query time: {time.time() - start:.3f}s") return indices def points_to_range_get_indices(points: np.ndarray, lidar: Lidar): """ Compute the row and column indices in the range image for multiple points. Args: points: (N, 3) array, points in the lidar frame. lidar: Lidar object. Returns: Two arrays (row_indices, col_indices) indicating the positions of the points in the range image. """ # Transform the points to the lidar's coordinate system. lidar_center = ct.convert.pose_to_C(lidar.pose) point_dirs = points - lidar_center point_dists = np.linalg.norm(point_dirs, axis=1, keepdims=True) point_dirs = point_dirs / point_dists # Get lidar directions. lidar_rays = lidar.get_rays() lidar_dirs = lidar_rays[:, 3:] # Find the closest directions for the points. ray_indices = find_closest_directions( query_dirs=point_dirs, model_dirs=lidar_dirs, ) # Compute the row and column indices. H, W = lidar.intrinsics.vertical_res, lidar.intrinsics.horizontal_res row_indices, col_indices = np.divmod(ray_indices, W) return row_indices, col_indices def points_to_range_with_nn(points: np.ndarray, lidar: Lidar): """ Convert points to a range image with nearest neighboring ray. Smallest distance will be used when multiple points map to the same pixel. Points outside the lidar's field of view are considered out-of-bound and ignored. Args: points: (N, 3) array, points in the lidar frame. lidar: Lidar object. """ # Constants for lidar's field of view (in radians). fov_up = np.radians(lidar.intrinsics.fov_up) fov_down = np.radians(lidar.intrinsics.fov_down) # Transform the points to the lidar's coordinate system. lidar_center = ct.convert.pose_to_C(lidar.pose) point_dirs = points - lidar_center point_dists = np.linalg.norm(point_dirs, axis=1) point_dirs /= point_dists[:, None] # Filter by "up-down" angles. xy_dists = np.linalg.norm(point_dirs[:, :2], axis=1) z_dists = point_dirs[:, 2] vertical_angles = np.arctan2(z_dists, xy_dists) valid_indices = np.where( (vertical_angles >= -fov_down) & (vertical_angles <= fov_up) )[0] print(f"Filter by angles: {len(points)} -> {len(valid_indices)}") point_dirs = point_dirs[valid_indices] point_dists = point_dists[valid_indices] # Filter by max_range. valid_indices = np.where(point_dists <= lidar.intrinsics.max_range)[0] print(f"Filter by max_range: {len(point_dirs)} -> {len(valid_indices)}") point_dirs = point_dirs[valid_indices] point_dists = point_dists[valid_indices] # Lidar directions are always ordered. lidar_rays = lidar.get_rays() lidar_dirs = lidar_rays[:, 3:] # For each valid point direction, find the closest direction in lidar_dirs. ray_indices = find_closest_directions( query_dirs=point_dirs, model_dirs=lidar_dirs, ) # Create a range image. H, W = lidar.intrinsics.vertical_res, lidar.intrinsics.horizontal_res im_range = np.full((H, W), np.inf, dtype=np.float32) # Map 1D indices to 2D (row, column) indices. row_indices, col_indices = np.divmod(ray_indices, W) # Update the range image, keeping the smallest distance for each pixel. for row, col, dist in zip(row_indices, col_indices, point_dists): im_range[row, col] = min(im_range[row, col], dist) # Replace np.inf with zeros (or any other appropriate value). im_range[im_range == np.inf] = 0 return im_range def points_to_range_with_fov(points: np.ndarray, lidar: Lidar): """ Convert lidar points to panoramic frame. Lidar points are in local coordinates. Args: points: (N, 3), float32, points in the lidar frame. lidar: Lidar object. Return: pano: (H, W), float32, panoramic image representing the depth. """ fov_up = lidar.intrinsics.fov_up fov_down = lidar.intrinsics.fov_down total_fov = fov_up + fov_down lidar_H = lidar.intrinsics.vertical_res lidar_W = lidar.intrinsics.horizontal_res max_depth = lidar.intrinsics.max_range # Compute distances to lidar center. lidar_center = ct.convert.pose_to_C(lidar.pose) local_points = points - lidar_center dists = np.linalg.norm(local_points, axis=1) # Fill pano. pano = np.zeros((lidar_H, lidar_W)) for local_point, dist in zip(local_points, dists): # Check max depth. if dist >= max_depth: continue x, y, z = local_point beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) + np.radians(fov_down) c = int(round(beta / (2 * np.pi / lidar_W))) r = int(round(lidar_H - alpha / (np.radians(total_fov) / lidar_H))) # Check out-of-bounds. if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue # Set to min dist if not set or if current dist is smaller. if pano[r, c] == 0.0 or pano[r, c] > dist: pano[r, c] = dist return pano def points_to_range_with_angles(points: np.ndarray, lidar: Lidar): """ Convert lidar points to panoramic frame using specified vertical angles. Lidar points are in local coordinates. Args: points: (N, 3), float32, points in the lidar frame. lidar: Lidar object. Return: pano: (H, W), float32, panoramic image representing the depth. """ lidar_H = lidar.intrinsics.vertical_res lidar_W = lidar.intrinsics.horizontal_res max_depth = lidar.intrinsics.max_range vertical_degrees = np.radians(lidar.intrinsics.vertical_degrees) # Compute distances to lidar center. lidar_center = ct.convert.pose_to_C(lidar.pose) local_points = points - lidar_center dists = np.linalg.norm(local_points, axis=1) # Fill pano. pano = np.zeros((lidar_H, lidar_W)) for local_point, dist in zip(local_points, dists): # Check max depth. if dist >= max_depth: continue x, y, z = local_point beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) c = int(round(beta / (2 * np.pi / lidar_W))) r = np.argmin(np.abs(vertical_degrees - alpha)) # Check out-of-bounds. if r >= lidar_H or r < 0 or c >= lidar_W or c < 0: continue # Set to min dist if not set or if current dist is smaller. if pano[r, c] == 0.0 or pano[r, c] > dist: pano[r, c] = dist return pano def points_to_range_indices(points: np.ndarray, lidar: Lidar): """ Using the "angles" method. Convert lidar points to indices in the panoramic frame using specified vertical angles. Lidar points are in local coordinates. Args: points: (N, 3), float32, points in the lidar frame. lidar: Lidar object. Return: row_indices: (N,), int, row indices in the panoramic frame. col_indices: (N,), int, column indices in the panoramic frame. """ lidar_H = lidar.intrinsics.vertical_res lidar_W = lidar.intrinsics.horizontal_res max_depth = lidar.intrinsics.max_range vertical_degrees = np.radians(lidar.intrinsics.vertical_degrees) # Transform the points to the lidar's coordinate system. lidar_center = ct.convert.pose_to_C(lidar.pose) local_points = points - lidar_center # Preallocate arrays for indices. row_indices = np.zeros(len(points), dtype=int) col_indices = np.zeros(len(points), dtype=int) for i, local_point in enumerate(local_points): x, y, z = local_point dist = np.linalg.norm(local_point) # Check max depth. if dist >= max_depth: continue beta = np.pi - np.arctan2(y, x) alpha = np.arctan2(z, np.sqrt(x**2 + y**2)) col = int(round(beta / (2 * np.pi / lidar_W))) row = np.argmin(np.abs(vertical_degrees - alpha)) # Check out-of-bounds. if row >= lidar_H or row < 0 or col >= lidar_W or col < 0: continue row_indices[i] = row col_indices[i] = col return row_indices, col_indices def main(): parser = argparse.ArgumentParser() parser.add_argument( "--style", type=str, required=True, choices=("waymo", "nuscenes"), help="To create raykeep dataset, the src and dst style are the same.", ) args = parser.parse_args() if args.style == "waymo": scene_dir = lit_paths.waymo.scene_dir sim_dir = lit_paths.waymo.to_waymo_sim_dir raykeep_dir = lit_paths.waymo.to_nuscenes_raykeep lidar_intrinsics = WaymoLidarIntrinsics() elif args.style == "nuscenes": scene_dir = lit_paths.nuscenes.scene_dir sim_dir = lit_paths.nuscenes.to_nuscenes_sim_dir raykeep_dir = lit_paths.nuscenes.to_nuscenes_raykeep lidar_intrinsics = NuScenesLidarIntrinsics() else: raise ValueError(f"Unknown style: {args.style}") # Check number of simulation scenes. sim_scene_paths = list(sorted(sim_dir.glob("*.pkl"))) scene_names = [p.stem for p in sim_scene_paths] print(f"Found {len(scene_names)} simulation scenes.") # Randomly pick num_scenes scenes. np.random.seed(0) num_scenes = 50 scene_names = np.random.choice( scene_names, size=num_scenes, replace=False, ) # Inputs: dir_x, dir_y, dir_z, dist, incident_angle. # Output: 1 for keep, 0 for drop. network_inputs = [] # (N, 5) network_outputs = [] # (N,) for scene_name in tqdm( scene_names, desc="Collecting scenes", ): scene_path = scene_dir / f"{scene_name}.pkl" sim_scene_path = sim_dir / f"{scene_name}.pkl" scene = Scene.load(path=scene_path) sim_scene = SimScene.load(path=sim_scene_path) assert len(scene) == len(sim_scene) for rel_frame, sim_frame in tqdm( zip(scene, sim_scene), desc="Collecting frames", total=len(scene), ): assert sim_frame.frame_index == rel_frame.frame_index assert np.allclose(sim_frame.frame_pose, rel_frame.frame_pose) # Assumeing vehicle coordinates (vehicle is at (0, 0, 0)). lidar_pose = rel_frame.lidar_to_vehicle_poses[0] lidar_center = ct.convert.pose_to_C(lidar_pose) lidar = Lidar( intrinsics=lidar_intrinsics, pose=lidar_pose, ) # Unpack data. H, W = lidar.intrinsics.vertical_res, lidar.intrinsics.horizontal_res sim_points = sim_frame.local_points rel_points = rel_frame.points sim_im_range = points_to_range_with_angles( points=sim_points, lidar=lidar, ) rel_im_range = points_to_range_with_angles( points=rel_points, lidar=lidar, ) # Find indices of sim rays in range image. row_indices, col_indices = points_to_range_indices( points=sim_points, lidar=lidar, ) im = np.zeros((H, W), dtype=np.float32) im[row_indices, col_indices] = 1 # Inputs (M, 5) # Each line is: dir_x, dir_y, dir_z, dist, incident_angle. dirs = sim_points - lidar_center dirs = dirs / np.linalg.norm(dirs, axis=1, keepdims=True) dists = sim_im_range[row_indices, col_indices] incident_angles = sim_frame.incident_angles inputs = np.concatenate( (dirs, dists[:, None], incident_angles[:, None]), axis=1 ) network_inputs.append(inputs) # Output: (M,) # 1 for keep, 0 for drop. ray_keeps = rel_im_range[row_indices, col_indices] ray_keeps[ray_keeps > 0] = 1 network_outputs.append(ray_keeps) # Replace points with raykeep points for debugging. overwrite_with_dropped_points = False if overwrite_with_dropped_points: ray_keeper = GBMRayKeeper.load() keep_probs = ray_keeper.predict(inputs) keep_masks = keep_probs > 0.5 print(f"Keep ratio: {np.mean(keep_masks)}") sim_points = sim_points[keep_masks] visualize = False if visualize: # Create range images. sim_im_range_with_fov = points_to_range_with_fov( points=sim_points, lidar=lidar, ) sim_im_range_with_angles = points_to_range_with_angles( points=sim_points, lidar=lidar, ) sim_im_range_with_nn = points_to_range_with_nn( points=sim_points, lidar=lidar, ) rel_im_range_with_fov = points_to_range_with_fov( points=rel_points, lidar=lidar, ) rel_im_range_with_angles = points_to_range_with_angles( points=rel_points, lidar=lidar, ) rel_im_range_with_nn = points_to_range_with_nn( points=rel_points, lidar=lidar, ) num_sim_with_fov = np.count_nonzero(sim_im_range_with_fov) num_sim_with_angles = np.count_nonzero(sim_im_range_with_angles) num_sim_with_nn = np.count_nonzero(sim_im_range_with_nn) num_rel_with_fov = np.count_nonzero(rel_im_range_with_fov) num_rel_with_angles = np.count_nonzero(rel_im_range_with_angles) num_rel_with_nn = np.count_nonzero(rel_im_range_with_nn) # Plot sim to top, rel to down. Add title for each. fig, axes = plt.subplots(6, 1) axes[0].imshow(sim_im_range_with_fov) axes[0].set_title(f"Sim with fov ({num_sim_with_fov} rays)") axes[1].imshow(sim_im_range_with_angles) axes[1].set_title( f"Sim with explicit angles ({num_sim_with_angles} rays)" ) axes[2].imshow(sim_im_range_with_nn) axes[2].set_title( f"Sim with nearest neighbors ({num_sim_with_nn} rays)" ) axes[3].imshow(rel_im_range_with_fov) axes[3].set_title(f"Rel with fov ({num_rel_with_fov} rays)") axes[4].imshow(rel_im_range_with_angles) axes[4].set_title( f"Rel with explicit angles ({num_rel_with_angles} rays)" ) axes[5].imshow(rel_im_range_with_nn) axes[5].set_title( f"Rel with nearest neighbors ({num_rel_with_nn} rays)" ) for ax in axes: ax.axis("off") plt.tight_layout() plt.show() visualize = False if visualize: rel_pcd = o3d.geometry.PointCloud() rel_pcd.points = o3d.utility.Vector3dVector(rel_points) rel_pcd.paint_uniform_color([0, 0, 1]) sim_pcd = o3d.geometry.PointCloud() sim_pcd.points = o3d.utility.Vector3dVector(sim_points) sim_pcd.paint_uniform_color([1, 0, 0]) axes = o3d.geometry.TriangleMesh.create_coordinate_frame() o3d.visualization.draw_geometries([rel_pcd, sim_pcd, axes]) # Save to disk. network_inputs = np.concatenate(network_inputs, axis=0).astype(np.float32) network_outputs = np.concatenate(network_outputs, axis=0).astype(np.float32) raykeep_dir.mkdir(parents=True, exist_ok=True) raykeep_path = raykeep_dir / "raykeep_data.npz" np.savez_compressed( raykeep_path, network_inputs=network_inputs, network_outputs=network_outputs, ) if __name__ == "__main__": # test_range_image() main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_train_mlp.py
Python
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from pathlib import Path # Define the MLP class SimpleMLP(nn.Module): def __init__(self, input_size): super(SimpleMLP, self).__init__() self.fc1 = nn.Linear(input_size, 64) # First hidden layer self.fc2 = nn.Linear(64, 32) # Second hidden layer self.fc3 = nn.Linear(32, 1) # Output layer def forward(self, x): x = F.relu(self.fc1(x)) # Activation function for first layer x = F.relu(self.fc2(x)) # Activation function for second layer x = torch.sigmoid(self.fc3(x)) # Sigmoid activation for output layer return x def load_and_split_dataset(file_path, split_ratio=0.8): data = np.load(file_path) network_inputs = data["network_inputs"] network_outputs = data["network_outputs"] split_index = int(len(network_inputs) * split_ratio) train_inputs = network_inputs[:split_index] train_outputs = network_outputs[:split_index] val_inputs = network_inputs[split_index:] val_outputs = network_outputs[split_index:] return train_inputs, train_outputs, val_inputs, val_outputs def prepare_dataloader(inputs, outputs, batch_size=512): tensor_x = torch.Tensor(inputs) tensor_y = torch.Tensor(outputs) dataset = TensorDataset(tensor_x, tensor_y) return DataLoader(dataset, batch_size=batch_size, shuffle=True) # Training function def train(model, train_loader, criterion, optimizer, num_epochs=50): model.train() for epoch in range(num_epochs): step = 0 for inputs, targets in train_loader: inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets.unsqueeze(1).float()) loss.backward() optimizer.step() step += 1 if step % 1000 == 0: print(f"Step {step}, Loss: {loss.item()}") print(f"Epoch {epoch+1}/{num_epochs}, Loss: {loss.item()}") # Evaluation function def evaluate(model, val_loader): model.eval() total = 0 correct = 0 with torch.no_grad(): for inputs, targets in val_loader: inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) predicted = outputs.round() # Threshold at 0.5 total += targets.size(0) correct += (predicted == targets.unsqueeze(1)).sum().item() accuracy = 100 * correct / total print(f"Accuracy: {accuracy}%") return accuracy # Main function def main(): # Load and prepare data train_inputs, train_outputs, val_inputs, val_outputs = load_and_split_dataset( Path.home() / "research/lit/data/nuscenes/09_raykeep/raykeep_data.npz" ) train_loader = prepare_dataloader(train_inputs, train_outputs, batch_size=512) val_loader = prepare_dataloader(val_inputs, val_outputs, batch_size=512) # Device configuration global device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Model, Loss, and Optimizer input_size = train_inputs.shape[1] model = SimpleMLP(input_size).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # Train and Evaluate train(model, train_loader, criterion, optimizer, num_epochs=10) evaluate(model, val_loader) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
lit_tools/lit_train_raykeep.py
Python
import random import shutil from pathlib import Path import configargparse import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from matplotlib import pyplot as plt from lit.network_utils import eval_raykeep_metrics, get_embed_fn, load_raykeep_dataset def setup_seed(seed): np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) random.seed(seed) torch.backends.cudnn.deterministic = True setup_seed(0) class RayKeepNet(nn.Module): def __init__( self, net_depth=4, net_width=128, input_ch=3, output_ch=1, ): """ """ super(RayKeepNet, self).__init__() self.net_depth = net_depth self.net_width = net_width self.input_ch = input_ch self.linear_layers = nn.ModuleList( [nn.Linear(input_ch, net_width)] + [nn.Linear(net_width, net_width) for i in range(net_depth - 1)] ) self.output_layer = nn.Linear(net_width, output_ch) self.linear_layers.apply(RayKeepNet.init_weights) self.output_layer.apply(RayKeepNet.init_weights) def forward(self, x): for linear_layer in self.linear_layers: x = linear_layer(x) x = F.relu(x) x = self.output_layer(x) x = torch.sigmoid(x) return x @staticmethod def init_weights(m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight.data) if m.bias is not None: nn.init.zeros_(m.bias.data) def config_parser(): parser = configargparse.ArgumentParser() # Directory options. parser.add_argument( "--config", is_config_file=True, default="raykeep.cfg", help="config file path", ) parser.add_argument( "--exp_name", type=str, default="raykeep", ) parser.add_argument( "--base_dir", type=str, default="./log", help="where to store ckpts and logs", ) parser.add_argument( "--no_reload", action="store_true", help="do not reload weights from saved ckpt", ) # Training options parser.add_argument( "--eval_only", action="store_true", help="Evaluate mode only.", ) parser.add_argument( "--net_depth", type=int, default=8, help="Number of ayers in network", ) parser.add_argument( "--net_width", type=int, default=256, help="Channels per layer", ) parser.add_argument( "--batch_size", type=int, default=2048, help="Batch size (number of random rays per gradient step)", ) parser.add_argument( "--lrate", type=float, default=5e-4, help="Learning rate", ) parser.add_argument( "--lrate_decay", type=int, default=500, help="Exponential learning rate decay (in 1000 steps)", ) parser.add_argument( "--total_iters", type=int, default=500000, ) # Rendering options parser.add_argument( "--multires", type=int, default=10, help="log2 of max freq for positional encoding (3D location)", ) parser.add_argument( "--multires_views", type=int, default=4, help="log2 of max freq for positional encoding (2D direction)", ) # Iteration options. parser.add_argument( "--i_print", type=int, default=100, help="Frequency of console printout and metric logging", ) parser.add_argument( "--i_weights", type=int, default=10000, help="Frequency of weight ckpt saving", ) # Loss type parser.add_argument( "--loss_type", type=str, default="img2mse", help="Options: img2mse / mseloss / l1loss", ) return parser def run_network(inputs, model, embed_fn, embed_dir_fn): """ Prepares inputs and applies network. """ ray_dirs, dist, incident = inputs[:, :3], inputs[:, 3], inputs[:, 4] embedded_inputs = torch.cat( ( embed_dir_fn(ray_dirs), embed_fn(incident.unsqueeze(1)), embed_fn(dist.unsqueeze(1)), ), dim=1, ) outputs = model(embedded_inputs) return outputs def evaluate_in_batches( inputs, model, embed_fn, embed_dir_fn, device, batch_size=1024, ): def batch_generator(data, batch_size): for i in range(0, len(data), batch_size): yield data[i : i + batch_size] all_preds = [] model.eval() with torch.no_grad(): for batch in batch_generator(inputs, batch_size): batch = torch.tensor(batch).to(device) preds = run_network(batch, model, embed_fn, embed_dir_fn) all_preds.append(preds.cpu().numpy()) return np.concatenate(all_preds) def main(): parser = config_parser() args = parser.parse_args() # Prepare states. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.set_default_tensor_type("torch.cuda.FloatTensor") # Select loss function loss_dict = { "img2mse": lambda x, y: torch.mean((x - y) ** 2), "mseloss": nn.MSELoss(), "bceloss": nn.BCELoss(), "l1loss": nn.L1Loss(reduction="mean"), } print(f"Selected loss type: {args.loss_type}") rgb_loss = loss_dict[args.loss_type] # Prepare loggings. base_dir = args.base_dir exp_name = args.exp_name log_dir = Path(base_dir) / exp_name log_args_path = log_dir / "args.txt" log_config_path = log_dir / "config.txt" log_dir.mkdir(parents=True, exist_ok=True) with open(log_args_path, "w", encoding="utf-8") as f: for arg in sorted(vars(args)): attr = getattr(args, arg) f.write("{} = {}\n".format(arg, attr)) if args.config is not None: shutil.copy(args.config, log_config_path) # Embedder # Input: [dir_x, dir_y, dir_z, dist, incident_angle] # Embedder for dist, incident_angle embed_fn, input_ch = get_embed_fn( args.multires, input_dims=1, ) # Embedder for (dir_x, dir_y, dir_z) embed_dir_fn, input_ch_views = get_embed_fn( args.multires, # args.multires_views input_dims=3, ) total_input_ch = input_ch * 2 + input_ch_views # Model model = RayKeepNet( net_depth=args.net_depth, net_width=args.net_width, input_ch=total_input_ch, ).to(device) # Optimizer optimizer = torch.optim.Adam( params=list(model.parameters()), lr=args.lrate, betas=(0.9, 0.999), ) # Find all .tar files in log_dir. is_ckpt_loaded = False ckpt_paths = sorted(log_dir.glob("*.tar")) if len(ckpt_paths) > 0: print(f"Found checkpoints: {ckpt_paths}") if args.no_reload: print("Ignoring checkpoints, not reloading.") global_iter = 0 else: ckpt_path = ckpt_paths[-1] print(f"Reloading from {ckpt_path}") ckpt = torch.load(ckpt_path) global_iter = ckpt["global_iter"] optimizer.load_state_dict(ckpt["optimizer_state_dict"]) model.load_state_dict(ckpt["network_fn_state_dict"]) is_ckpt_loaded = True else: global_iter = 0 # Load data. train_inputs, train_outputs, val_inputs, val_outputs = load_raykeep_dataset( lit_paths.nuscenes.to_nuscenes_raykeep / "raykeep_data.npz", balance=True, ) debug_with_few_samples = False if debug_with_few_samples: num_samples = 500000 train_inputs = train_inputs[:num_samples] train_outputs = train_outputs[:num_samples] if args.eval_only: if not is_ckpt_loaded: raise ValueError("No checkpoint loaded, cannot evaluate.") model.eval() print("[Validation set eval]") with torch.no_grad(): pred_outputs = run_network( torch.tensor(val_inputs).to(device), model, embed_fn, embed_dir_fn, ) pd_raykeeps = pred_outputs.cpu().numpy().round().astype(np.int_).ravel() gt_raykeeps = val_outputs.astype(np.int_) eval_raykeep_metrics( pd_raykeeps=pd_raykeeps, gt_raykeeps=gt_raykeeps, ) print("[Training set eval]") pd_raykeeps = evaluate_in_batches( train_inputs, model, embed_fn, embed_dir_fn, device ) pd_raykeeps = pd_raykeeps.round().astype(np.int_).ravel() gt_raykeeps = train_outputs.astype(np.int_) eval_raykeep_metrics( pd_raykeeps=pd_raykeeps, gt_raykeeps=gt_raykeeps, ) exit(0) # (N, 5) cat (N,) -> (N, 6) train_data = np.concatenate([train_inputs, train_outputs[:, None]], axis=1) # Prepare ray batch tensor if batching random rays batch_size = args.batch_size train_data = torch.tensor(train_data).to(device) total_iters = args.total_iters + 1 print("Begin") loss_log = [] i_batch = 0 for i in range(global_iter + 1, total_iters): # Load batch data. batch = train_data[i_batch : i_batch + batch_size] inputs, gt_keeps = batch[:, :5], batch[:, 5] i_batch += batch_size # Reset and shuffle data. if i_batch >= len(train_data): rand_idx = torch.randperm(len(train_data)) train_data = train_data[rand_idx] i_batch = 0 # Core optimization loop. pd_keeps = run_network(inputs, model, embed_fn, embed_dir_fn) optimizer.zero_grad() loss = rgb_loss(pd_keeps, gt_keeps.unsqueeze(1)) loss.backward() optimizer.step() # Update learning rate. decay_rate = 0.1 decay_steps = args.lrate_decay * 1000 new_lrate = args.lrate * (decay_rate ** (global_iter / decay_steps)) for param_group in optimizer.param_groups: param_group["lr"] = new_lrate # Save checkpoint. if i % args.i_weights == 0: ckpt_path = log_dir / f"{i:06d}.tar" ckpt = { "global_iter": global_iter, "network_fn_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), } torch.save(ckpt, ckpt_path) print(f"Saved checkpoints at {ckpt_path}") # Log training. loss_log.append(loss.item()) if i % args.i_print == 0: loss_save = np.array(loss_log) plt.plot(loss_save) plt.savefig(log_dir / "loss_curve.png") plt.close() print(f"[TRAIN] Iter: {i}, Loss: {loss.item():.4f}") global_iter += 1 # Save loss log. loss_log = np.array(loss_log) np.save(log_dir / "loss_log.npy", loss_log) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/__init__.py
Python
import pkg_resources __version__ = pkg_resources.get_distribution("lit").version
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/config.py
Python
from pathlib import Path import yaml from easydict import EasyDict def log_config_to_file(cfg, pre="cfg", logger=None): for key, val in cfg.items(): if isinstance(cfg[key], EasyDict): logger.info("\n%s.%s = edict()" % (pre, key)) log_config_to_file(cfg[key], pre=pre + "." + key, logger=logger) continue logger.info("%s.%s: %s" % (pre, key, val)) def cfg_from_list(cfg_list, config): """Set config keys via list (e.g., from command line).""" from ast import literal_eval assert len(cfg_list) % 2 == 0 for k, v in zip(cfg_list[0::2], cfg_list[1::2]): key_list = k.split(".") d = config for subkey in key_list[:-1]: assert subkey in d, "NotFoundKey: %s" % subkey d = d[subkey] subkey = key_list[-1] assert subkey in d, "NotFoundKey: %s" % subkey try: value = literal_eval(v) except: value = v if isinstance(value, tuple): value = list(value) if type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict): key_val_list = value.split(",") for src in key_val_list: cur_key, cur_val = src.split(":") val_type = type(d[subkey][cur_key]) cur_val = val_type(cur_val) d[subkey][cur_key] = cur_val elif type(value) != type(d[subkey]) and isinstance(d[subkey], list): val_list = value.split(",") for k, x in enumerate(val_list): val_list[k] = type(d[subkey][0])(x) d[subkey] = val_list else: assert type(value) == type( d[subkey] ), "type {} does not match original type {}".format( type(value), type(d[subkey]) ) d[subkey] = value def merge_new_config(config, new_config): if "_BASE_CONFIG_" in new_config: with open(new_config["_BASE_CONFIG_"], "r") as f: try: yaml_config = yaml.safe_load(f, Loader=yaml.FullLoader) except: yaml_config = yaml.safe_load(f) config.update(EasyDict(yaml_config)) for key, val in new_config.items(): if not isinstance(val, dict): config[key] = val continue if key not in config: config[key] = EasyDict() merge_new_config(config[key], val) return config def cfg_from_yaml_file(cfg_file, config): with open(cfg_file, "r") as f: try: new_config = yaml.safe_load(f, Loader=yaml.FullLoader) except: new_config = yaml.safe_load(f) merge_new_config(config=config, new_config=new_config) return config cfg = EasyDict() cfg.ROOT_DIR = (Path(__file__).resolve().parent / "../").resolve() cfg.LOCAL_RANK = 0
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/__init__.py
Python
import torch from torch.utils.data import DataLoader from torch.utils.data import DistributedSampler as _DistributedSampler from pcdet.datasets.dataset import DatasetTemplate from pcdet.datasets.kitti.kitti_dataset import KittiDataset from pcdet.datasets.lyft.lyft_dataset import LyftDataset from pcdet.datasets.nuscenes.nuscenes_dataset import NuScenesDataset from pcdet.datasets.waymo.waymo_dataset import ( MixedWaymoDataset, WaymoDataset, WaymoDatasetInfo, ) from pcdet.datasets.wayscenes.wayscenes_dataset import WayScenesDataset from pcdet.utils import common_utils __all__ = { "DatasetTemplate": DatasetTemplate, "KittiDataset": KittiDataset, "WaymoDataset": WaymoDataset, "MixedWaymoDataset": MixedWaymoDataset, "WayScenesDataset": WayScenesDataset, "WaymoDatasetInfo": WaymoDatasetInfo, "NuScenesDataset": NuScenesDataset, "LyftDataset": LyftDataset, } class DistributedSampler(_DistributedSampler): def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): super().__init__(dataset, num_replicas=num_replicas, rank=rank) self.shuffle = shuffle def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices) def build_dataloader( dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4, logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0, force_no_shuffle=False, ): dataset = __all__[dataset_cfg.DATASET]( dataset_cfg=dataset_cfg, class_names=class_names, root_path=root_path, training=training, logger=logger, ) if merge_all_iters_to_one_epoch: assert hasattr(dataset, "merge_all_iters_to_one_epoch") dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) if dist: if training: sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: rank, world_size = common_utils.get_dist_info() sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) else: sampler = None dataloader = DataLoader( dataset, batch_size=batch_size, pin_memory=True, num_workers=workers, shuffle=(sampler is None) and training and (not force_no_shuffle), collate_fn=dataset.collate_batch, drop_last=False, sampler=sampler, timeout=0, ) return dataset, dataloader, sampler def build_wayscenes_dataloader( waymo_cfg, nuscenes_cfg, batch_size, dist, root_path=None, workers=4, logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0, force_no_shuffle=False, ): """ Builds a dataloader for the WayScenes dataset, which contains instances of both WaymoDataset and NuScenesDataset. Args: waymo_cfg: Configuration for Waymo dataset. nuscenes_cfg: Configuration for NuScenes dataset. batch_size: Batch size for the dataloader. dist: Boolean indicating if distributed training is used. root_path: Root directory path where datasets are stored. workers: Number of workers for the dataloader. logger: Logger for logging purposes. training: Boolean indicating if the dataset is used for training. merge_all_iters_to_one_epoch: If True, all iterations are merged into one epoch. total_epochs: Total number of epochs for training. force_no_shuffle: If True, disables shuffling of the dataset. Returns: dataset: The WayScenes dataset object. dataloader: The dataloader for the WayScenes dataset. sampler: The sampler used for the dataset; None if not in distributed mode. """ from pcdet.datasets import WayScenesDataset dataset = WayScenesDataset( waymo_cfg=waymo_cfg, nuscenes_cfg=nuscenes_cfg, training=training, root_path=root_path, logger=logger, ) if merge_all_iters_to_one_epoch: assert hasattr( dataset, "merge_all_iters_to_one_epoch" ), "merge_all_iters_to_one_epoch not implemented for WayScenesDataset" dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) if dist: if training: sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: rank, world_size = common_utils.get_dist_info() sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank, shuffle=False ) else: sampler = None dataloader = DataLoader( dataset, batch_size=batch_size, pin_memory=True, num_workers=workers, shuffle=(sampler is None) and training and not force_no_shuffle, collate_fn=dataset.collate_batch, drop_last=False, sampler=sampler, timeout=0, ) return dataset, dataloader, sampler
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/augmentor/augmentor_utils.py
Python
import copy import warnings import numba import numpy as np import torch from pcdet.ops.iou3d_nms import iou3d_nms_utils from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import common_utils try: from numba.errors import NumbaPerformanceWarning warnings.filterwarnings("ignore", category=NumbaPerformanceWarning) except: pass def random_flip_along_x(gt_boxes, points): """ Args: gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] points: (M, 3 + C) Returns: """ enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5]) if enable: gt_boxes[:, 1] = -gt_boxes[:, 1] gt_boxes[:, 6] = -gt_boxes[:, 6] points[:, 1] = -points[:, 1] if gt_boxes.shape[1] > 7: gt_boxes[:, 8] = -gt_boxes[:, 8] return gt_boxes, points def random_flip_along_y(gt_boxes, points): """ Args: gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] points: (M, 3 + C) Returns: """ enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5]) if enable: gt_boxes[:, 0] = -gt_boxes[:, 0] gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi) points[:, 0] = -points[:, 0] if gt_boxes.shape[1] > 7: gt_boxes[:, 7] = -gt_boxes[:, 7] return gt_boxes, points def global_rotation(gt_boxes, points, rot_range): """ Args: gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] points: (M, 3 + C), rot_range: [min, max] Returns: """ noise_rotation = np.random.uniform(rot_range[0], rot_range[1]) points = common_utils.rotate_points_along_z( points[np.newaxis, :, :], np.array([noise_rotation]) )[0] gt_boxes[:, 0:3] = common_utils.rotate_points_along_z( gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]) )[0] gt_boxes[:, 6] += noise_rotation if gt_boxes.shape[1] > 7: gt_boxes[:, 7:9] = common_utils.rotate_points_along_z( np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[ np.newaxis, :, : ], np.array([noise_rotation]), )[0][:, 0:2] return gt_boxes, points def global_scaling(gt_boxes, points, scale_range): """ Args: gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading] points: (M, 3 + C), scale_range: [min, max] Returns: """ if scale_range[1] - scale_range[0] < 1e-3: return gt_boxes, points noise_scale = np.random.uniform(scale_range[0], scale_range[1]) points[:, :3] *= noise_scale gt_boxes[:, :6] *= noise_scale return gt_boxes, points def global_sampling(gt_boxes, points, gt_boxes_mask, sample_ratio_range, prob): """ Args: gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading] points: (M, 3 + C) gt_boxes_mask: (N), boolen mask for gt_boxes sample_ratio_range: [min, max]. ratio to keep points remain. prob: prob to dentermine whether sampling this frame Returns: """ if np.random.uniform(0, 1) > prob: return gt_boxes, points, gt_boxes_mask num_points = points.shape[0] sample_ratio = np.random.uniform(sample_ratio_range[0], sample_ratio_range[1]) remain_points_num = int(num_points * sample_ratio) # shuffle points shuffle_idx = np.random.permutation(points.shape[0]) points = points[shuffle_idx] # sample points points = points[:remain_points_num] # mask empty gt_boxes num_points_in_gt = ( roiaware_pool3d_utils.points_in_boxes_cpu( torch.from_numpy(points[:, :3]), torch.from_numpy(gt_boxes[:, :7]) ) .numpy() .sum(axis=1) ) mask = num_points_in_gt >= 1 gt_boxes_mask = gt_boxes_mask & mask return gt_boxes, points, gt_boxes_mask def scale_pre_object(gt_boxes, points, gt_boxes_mask, scale_perturb, num_try=50): """ uniform sacle object with given range Args: gt_boxes: (N, 7) under unified coordinates points: (M, 3 + C) points in lidar gt_boxes_mask: (N), boolen mask for scale_perturb: num_try: Returns: """ num_boxes = gt_boxes.shape[0] if not isinstance(scale_perturb, (list, tuple, np.ndarray)): scale_perturb = [-scale_perturb, scale_perturb] # boxes wise scale ratio scale_noises = np.random.uniform( scale_perturb[0], scale_perturb[1], size=[num_boxes, num_try] ) for k in range(num_boxes): if gt_boxes_mask[k] == 0: continue scl_box = copy.deepcopy(gt_boxes[k]) scl_box = scl_box.reshape(1, -1).repeat([num_try], axis=0) scl_box[:, 3:6] = scl_box[:, 3:6] * scale_noises[k].reshape(-1, 1).repeat( [3], axis=1 ) # detect conflict # [num_try, N-1] if num_boxes > 1: self_mask = np.ones(num_boxes, dtype=np.bool_) self_mask[k] = False iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(scl_box, gt_boxes[self_mask]) ious = np.max(iou_matrix, axis=1) no_conflict_mask = ious == 0 # all trys have conflict with other gts if no_conflict_mask.sum() == 0: continue # scale points and assign new box try_idx = no_conflict_mask.nonzero()[0][0] else: try_idx = 0 point_masks = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], np.expand_dims(gt_boxes[k], axis=0) ).squeeze(0) obj_points = points[point_masks > 0] obj_center, lwh, ry = gt_boxes[k, 0:3], gt_boxes[k, 3:6], gt_boxes[k, 6] # relative coordinates obj_points[:, 0:3] -= obj_center obj_points = common_utils.rotate_points_along_z( np.expand_dims(obj_points, axis=0), -ry ).squeeze(0) new_lwh = lwh * scale_noises[k][try_idx] obj_points[:, 0:3] = obj_points[:, 0:3] * scale_noises[k][try_idx] obj_points = common_utils.rotate_points_along_z( np.expand_dims(obj_points, axis=0), ry ).squeeze(0) # calculate new object center to avoid object float over the road obj_center[2] += (new_lwh[2] - lwh[2]) / 2 obj_points[:, 0:3] += obj_center points[point_masks > 0] = obj_points gt_boxes[k, 3:6] = new_lwh # if enlarge boxes, remove bg points if scale_noises[k][try_idx] > 1: points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], np.expand_dims(gt_boxes[k], axis=0) ).squeeze(0) keep_mask = ~np.logical_xor(point_masks, points_dst_mask) points = points[keep_mask] return points, gt_boxes def normalize_object_size(boxes, points, boxes_mask, size_res): """ :param boxes: (N, 7) under unified boxes :param points: (N, 3 + C) :param boxes_mask :param size_res: (3) [l, w, h] :return: """ points = copy.deepcopy(points) boxes = copy.deepcopy(boxes) for k in range(boxes.shape[0]): # skip boxes that not need to normalize if boxes_mask[k] == 0: continue masks = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], boxes[k : k + 1] ).squeeze(0) obj_points = points[masks > 0] obj_center, lwh, ry = boxes[k, 0:3], boxes[k, 3:6], boxes[k, 6] obj_points[:, 0:3] -= obj_center obj_points = common_utils.rotate_points_along_z( np.expand_dims(obj_points, axis=0), -ry ).squeeze(0) new_lwh = lwh + np.array(size_res) # skip boxes that shift to have negative if (new_lwh < 0).any(): boxes_mask[k] = False continue scale_lwh = new_lwh / lwh obj_points[:, 0:3] = obj_points[:, 0:3] * scale_lwh obj_points = common_utils.rotate_points_along_z( np.expand_dims(obj_points, axis=0), ry ).squeeze(0) # calculate new object center to avoid object float over the road obj_center[2] += size_res[2] / 2 obj_points[:, 0:3] += obj_center points[masks > 0] = obj_points boxes[k, 3:6] = new_lwh # if enlarge boxes, remove bg points if (np.array(size_res) > 0).any(): points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], np.expand_dims(boxes[k], axis=0) ).squeeze(0) keep_mask = ~np.logical_xor(masks, points_dst_mask) points = points[keep_mask] return points, boxes def rotate_objects(gt_boxes, points, gt_boxes_mask, rotation_perturb, prob, num_try=50): """ Args: gt_boxes: [N, 7] (x, y, z, dx, dy, dz, heading) on unified coordinate points: [M] gt_boxes_mask: [N] bool rotation_perturb: ratation noise parameter prob: prob to random rotate object num_try: times to try rotate one object Returns: """ num_boxes = gt_boxes.shape[0] if not isinstance(rotation_perturb, (list, tuple, np.ndarray)): rotation_perturb = [-rotation_perturb, rotation_perturb] # with prob to rotate each object rot_mask = np.random.uniform(0, 1, size=[num_boxes]) < prob # generate random ratate noise for each boxes rot_noise = np.random.uniform( rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try] ) for idx in range(num_boxes): # don't need to rotate this object if (not rot_mask[idx]) or (not gt_boxes_mask[idx]): continue # generate rotated boxes num_try times rot_box = copy.deepcopy(gt_boxes[idx]) # [num_try, 7] rot_box = rot_box.reshape(1, -1).repeat([num_try], axis=0) rot_box[:, 6] += rot_noise[idx] # detect conflict # [num_try, N-1] if num_boxes > 1: self_mask = np.ones(num_boxes, dtype=np.bool_) self_mask[idx] = False iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(rot_box, gt_boxes[self_mask]) ious = np.max(iou_matrix, axis=1) no_conflict_mask = ious == 0 # all trys have conflict with other gts if no_conflict_mask.sum() == 0: continue # rotate points and assign new box try_idx = no_conflict_mask.nonzero()[0][0] else: try_idx = 0 point_masks = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], np.expand_dims(gt_boxes[idx], axis=0) ).squeeze(0) object_points = points[point_masks > 0] object_center = gt_boxes[idx][0:3] object_points[:, 0:3] -= object_center object_points = common_utils.rotate_points_along_z( object_points[np.newaxis, :, :], np.array([rot_noise[idx][try_idx]]) )[0] object_points[:, 0:3] += object_center points[point_masks > 0] = object_points # remove bg points that lie the position we want to place object points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu( points[:, 0:3], np.expand_dims(rot_box[try_idx], axis=0) ).squeeze(0) keep_mask = ~np.logical_xor(point_masks, points_dst_mask) points = points[keep_mask] gt_boxes[idx] = rot_box[try_idx] return gt_boxes, points
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/augmentor/data_augmentor.py
Python
import copy from functools import partial import numpy as np import open3d as o3d from lit.recon_utils import bboxes_to_lineset from pcdet.datasets.augmentor import augmentor_utils, database_sampler from pcdet.utils import common_utils class DataAugmentor(object): def __init__(self, root_path, augmentor_configs, class_names, logger=None): self.root_path = root_path self.class_names = class_names self.logger = logger self.augmentor_configs = augmentor_configs self.data_augmentor_queue = [] aug_config_list = ( augmentor_configs if isinstance(augmentor_configs, list) else augmentor_configs.AUG_CONFIG_LIST ) for cur_cfg in aug_config_list: if not isinstance(augmentor_configs, list): if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST: continue cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg) self.data_augmentor_queue.append(cur_augmentor) def gt_sampling(self, config=None): db_sampler = database_sampler.DataBaseSampler( root_path=self.root_path, sampler_cfg=config, class_names=self.class_names, logger=self.logger, ) return db_sampler def __getstate__(self): d = dict(self.__dict__) del d["logger"] return d def __setstate__(self, d): self.__dict__.update(d) def random_object_rotation(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_object_rotation, config=config) gt_boxes, points = augmentor_utils.rotate_objects( data_dict["gt_boxes"], data_dict["points"], data_dict["gt_boxes_mask"], rotation_perturb=config["ROT_UNIFORM_NOISE"], prob=config["ROT_PROB"], num_try=50, ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points return data_dict def random_object_scaling(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_object_scaling, config=config) points, gt_boxes = augmentor_utils.scale_pre_object( data_dict["gt_boxes"], data_dict["points"], gt_boxes_mask=data_dict["gt_boxes_mask"], scale_perturb=config["SCALE_UNIFORM_NOISE"], ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points return data_dict def random_world_sampling(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_world_sampling, config=config) gt_boxes, points, gt_boxes_mask = augmentor_utils.global_sampling( data_dict["gt_boxes"], data_dict["points"], gt_boxes_mask=data_dict["gt_boxes_mask"], sample_ratio_range=config["WORLD_SAMPLE_RATIO"], prob=config["PROB"], ) data_dict["gt_boxes"] = gt_boxes data_dict["gt_boxes_mask"] = gt_boxes_mask data_dict["points"] = points return data_dict def random_world_flip(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_world_flip, config=config) gt_boxes, points = data_dict["gt_boxes"], data_dict["points"] for cur_axis in config["ALONG_AXIS_LIST"]: assert cur_axis in ["x", "y"] gt_boxes, points = getattr( augmentor_utils, "random_flip_along_%s" % cur_axis )( gt_boxes, points, ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points return data_dict def random_world_rotation(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_world_rotation, config=config) rot_range = config["WORLD_ROT_ANGLE"] if not isinstance(rot_range, list): rot_range = [-rot_range, rot_range] gt_boxes, points = augmentor_utils.global_rotation( data_dict["gt_boxes"], data_dict["points"], rot_range=rot_range ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points return data_dict def random_world_scaling(self, data_dict=None, config=None): if data_dict is None: return partial(self.random_world_scaling, config=config) gt_boxes, points = augmentor_utils.global_scaling( data_dict["gt_boxes"], data_dict["points"], config["WORLD_SCALE_RANGE"] ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points return data_dict def normalize_object_size(self, data_dict=None, config=None): if data_dict is None: return partial(self.normalize_object_size, config=config) # Backup unscaled points and gt_boxes src_points = copy.deepcopy(data_dict["points"]) src_gt_boxes = copy.deepcopy(data_dict["gt_boxes"]) points, gt_boxes = augmentor_utils.normalize_object_size( data_dict["gt_boxes"], data_dict["points"], data_dict["gt_boxes_mask"], config["SIZE_RES"], ) data_dict["gt_boxes"] = gt_boxes data_dict["points"] = points visualize = False if visualize: boxes_mask = data_dict["gt_boxes_mask"] src_pcd = o3d.geometry.PointCloud() src_pcd.points = o3d.utility.Vector3dVector(src_points[:, :3]) src_pcd.paint_uniform_color([0.0, 0.0, 1.0]) src_bboxes_ls = bboxes_to_lineset( src_gt_boxes[boxes_mask], frame_pose=np.eye(4) ) src_bboxes_ls.paint_uniform_color([0.0, 0.0, 1.0]) dst_pcd = o3d.geometry.PointCloud() dst_pcd.points = o3d.utility.Vector3dVector(points[:, :3]) dst_pcd.paint_uniform_color([1.0, 0.0, 0.0]) dst_bboxes_ls = bboxes_to_lineset( gt_boxes[boxes_mask], frame_pose=np.eye(4) ) dst_bboxes_ls.paint_uniform_color([1.0, 0.0, 0.0]) o3d.visualization.draw_geometries( [ src_pcd, dst_pcd, src_bboxes_ls, dst_bboxes_ls, ] ) return data_dict def forward(self, data_dict): """ Args: data_dict: points: (N, 3 + C_in) gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading] gt_names: optional, (N), string ... Returns: """ for cur_augmentor in self.data_augmentor_queue: data_dict = cur_augmentor(data_dict=data_dict) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) data_dict["gt_boxes"][:, 6] = common_utils.limit_period( data_dict["gt_boxes"][:, 6], offset=0.5, period=2 * np.pi ) if "calib" in data_dict: data_dict.pop("calib") if "road_plane" in data_dict: data_dict.pop("road_plane") if "gt_boxes_mask" in data_dict: gt_boxes_mask = data_dict["gt_boxes_mask"] data_dict["gt_boxes"] = data_dict["gt_boxes"][gt_boxes_mask] data_dict["gt_names"] = data_dict["gt_names"][gt_boxes_mask] if "obj_ids" in data_dict: data_dict["obj_ids"] = data_dict["obj_ids"][gt_boxes_mask] data_dict.pop("gt_boxes_mask") if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) return data_dict def re_prepare(self, augmentor_configs=None, intensity=None): self.data_augmentor_queue = [] if augmentor_configs is None: augmentor_configs = self.augmentor_configs aug_config_list = ( augmentor_configs if isinstance(augmentor_configs, list) else augmentor_configs.AUG_CONFIG_LIST ) for cur_cfg in aug_config_list: if not isinstance(augmentor_configs, list): if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST: continue # scale data augmentation intensity if intensity is not None: cur_cfg = self.adjust_augment_intensity(cur_cfg, intensity) cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg) self.data_augmentor_queue.append(cur_augmentor) def adjust_augment_intensity(self, config, intensity): adjust_map = { "random_object_scaling": "SCALE_UNIFORM_NOISE", "random_object_rotation": "ROT_UNIFORM_NOISE", "random_world_rotation": "WORLD_ROT_ANGLE", "random_world_scaling": "WORLD_SCALE_RANGE", } def cal_new_intensity(config, flag): origin_intensity_list = config.get(adjust_map[config.NAME]) assert len(origin_intensity_list) == 2 assert np.isclose( flag - origin_intensity_list[0], origin_intensity_list[1] - flag ) noise = origin_intensity_list[1] - flag new_noise = noise * intensity new_intensity_list = [flag - new_noise, new_noise + flag] return new_intensity_list if config.NAME not in adjust_map: return config # for data augmentations that init with 1 if config.NAME in ["random_object_scaling", "random_world_scaling"]: new_intensity_list = cal_new_intensity(config, flag=1) setattr(config, adjust_map[config.NAME], new_intensity_list) return config elif config.NAME in ["random_object_rotation", "random_world_rotation"]: new_intensity_list = cal_new_intensity(config, flag=0) setattr(config, adjust_map[config.NAME], new_intensity_list) return config else: raise NotImplementedError
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/augmentor/database_sampler.py
Python
import pickle import numpy as np from pcdet.ops.iou3d_nms import iou3d_nms_utils from pcdet.utils import box_utils class DataBaseSampler(object): def __init__(self, root_path, sampler_cfg, class_names, logger=None): self.root_path = root_path self.class_names = class_names self.sampler_cfg = sampler_cfg self.logger = logger self.db_infos = {} for class_name in class_names: self.db_infos[class_name] = [] for db_info_path in sampler_cfg.DB_INFO_PATH: db_info_path = self.root_path.resolve() / db_info_path with open(str(db_info_path), "rb") as f: infos = pickle.load(f) [ self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names ] for func_name, val in sampler_cfg.PREPARE.items(): self.db_infos = getattr(self, func_name)(self.db_infos, val) self.sample_groups = {} self.sample_class_num = {} self.limit_whole_scene = sampler_cfg.get("LIMIT_WHOLE_SCENE", False) for x in sampler_cfg.SAMPLE_GROUPS: class_name, sample_num = x.split(":") if class_name not in class_names: continue self.sample_class_num[class_name] = sample_num self.sample_groups[class_name] = { "sample_num": sample_num, "pointer": len(self.db_infos[class_name]), "indices": np.arange(len(self.db_infos[class_name])), } def __getstate__(self): d = dict(self.__dict__) del d["logger"] return d def __setstate__(self, d): self.__dict__.update(d) def filter_by_difficulty(self, db_infos, removed_difficulty): new_db_infos = {} for key, dinfos in db_infos.items(): pre_len = len(dinfos) new_db_infos[key] = [ info for info in dinfos if info["difficulty"] not in removed_difficulty ] if self.logger is not None: self.logger.info( "Database filter by difficulty %s: %d => %d" % (key, pre_len, len(new_db_infos[key])) ) return new_db_infos def filter_by_min_points(self, db_infos, min_gt_points_list): for name_num in min_gt_points_list: name, min_num = name_num.split(":") min_num = int(min_num) if min_num > 0 and name in db_infos.keys(): filtered_infos = [] for info in db_infos[name]: if info["num_points_in_gt"] >= min_num: filtered_infos.append(info) if self.logger is not None: self.logger.info( "Database filter by min points %s: %d => %d" % (name, len(db_infos[name]), len(filtered_infos)) ) db_infos[name] = filtered_infos return db_infos def sample_with_fixed_number(self, class_name, sample_group): """ Args: class_name: sample_group: Returns: """ sample_num, pointer, indices = ( int(sample_group["sample_num"]), sample_group["pointer"], sample_group["indices"], ) if pointer >= len(self.db_infos[class_name]): indices = np.random.permutation(len(self.db_infos[class_name])) pointer = 0 sampled_dict = [ self.db_infos[class_name][idx] for idx in indices[pointer : pointer + sample_num] ] pointer += sample_num sample_group["pointer"] = pointer sample_group["indices"] = indices return sampled_dict @staticmethod def put_boxes_on_road_planes(gt_boxes, road_planes, calib): """ Only validate in KITTIDataset Args: gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] road_planes: [a, b, c, d] calib: Returns: """ a, b, c, d = road_planes center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3]) cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b center_cam[:, 1] = cur_height_cam cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2] mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height gt_boxes[:, 2] -= mv_height # lidar view return gt_boxes, mv_height def add_sampled_boxes_to_scene( self, data_dict, sampled_gt_boxes, total_valid_sampled_dict ): gt_boxes_mask = data_dict["gt_boxes_mask"] gt_boxes = data_dict["gt_boxes"][gt_boxes_mask] gt_names = data_dict["gt_names"][gt_boxes_mask] points = data_dict["points"] if self.sampler_cfg.get("USE_ROAD_PLANE", False): sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes( sampled_gt_boxes, data_dict["road_plane"], data_dict["calib"] ) data_dict.pop("calib") data_dict.pop("road_plane") obj_points_list = [] for idx, info in enumerate(total_valid_sampled_dict): file_path = self.root_path / info["path"] obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape( [-1, self.sampler_cfg.NUM_POINT_FEATURES] ) obj_points[:, :3] += info["box3d_lidar"][:3] if self.sampler_cfg.get("USE_ROAD_PLANE", False): # mv height obj_points[:, 2] -= mv_height[idx] obj_points_list.append(obj_points) obj_points = np.concatenate(obj_points_list, axis=0) sampled_gt_names = np.array([x["name"] for x in total_valid_sampled_dict]) large_sampled_gt_boxes = box_utils.enlarge_box3d( sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH ) points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes) points = np.concatenate([obj_points, points], axis=0) gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0) gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0) data_dict["gt_boxes"] = gt_boxes data_dict["gt_names"] = gt_names data_dict["points"] = points return data_dict def __call__(self, data_dict): """ Args: data_dict: gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] Returns: """ gt_boxes = data_dict["gt_boxes"] gt_names = data_dict["gt_names"].astype(str) existed_boxes = gt_boxes total_valid_sampled_dict = [] for class_name, sample_group in self.sample_groups.items(): if self.limit_whole_scene: num_gt = np.sum(class_name == gt_names) sample_group["sample_num"] = str( int(self.sample_class_num[class_name]) - num_gt ) if int(sample_group["sample_num"]) > 0: sampled_dict = self.sample_with_fixed_number(class_name, sample_group) sampled_boxes = np.stack( [x["box3d_lidar"] for x in sampled_dict], axis=0 ).astype(np.float32) if self.sampler_cfg.get("DATABASE_WITH_FAKELIDAR", False): sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar( sampled_boxes ) iou1 = iou3d_nms_utils.boxes_bev_iou_cpu( sampled_boxes[:, 0:7], existed_boxes[:, 0:7] ) iou2 = iou3d_nms_utils.boxes_bev_iou_cpu( sampled_boxes[:, 0:7], sampled_boxes[:, 0:7] ) iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0 iou1 = iou1 if iou1.shape[1] > 0 else iou2 valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0] valid_sampled_dict = [sampled_dict[x] for x in valid_mask] valid_sampled_boxes = sampled_boxes[valid_mask] existed_boxes = np.concatenate( (existed_boxes, valid_sampled_boxes), axis=0 ) total_valid_sampled_dict.extend(valid_sampled_dict) sampled_gt_boxes = existed_boxes[gt_boxes.shape[0] :, :] if total_valid_sampled_dict.__len__() > 0: data_dict = self.add_sampled_boxes_to_scene( data_dict, sampled_gt_boxes, total_valid_sampled_dict ) # data_dict.pop('gt_boxes_mask') data_dict["gt_boxes_mask"] = np.ones( data_dict["gt_boxes"].shape[0], dtype=np.bool_ ) return data_dict
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/dataset.py
Python
import copy from collections import defaultdict from pathlib import Path import numpy as np import torch import torch.utils.data as torch_data from pcdet.datasets.augmentor.data_augmentor import DataAugmentor from pcdet.datasets.processor.data_processor import DataProcessor from pcdet.datasets.processor.point_feature_encoder import PointFeatureEncoder from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import box_utils, common_utils, self_training_utils class DatasetTemplate(torch_data.Dataset): def __init__( self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None, allow_empty_gt_boxes=False, ): """ Args: allow_empty_gt_boxes: If True, the frame without any gt_boxes will still be returned. If False, and when training, the frame without any gt_boxes will be replaced by a completely random frame. """ super().__init__() self.dataset_cfg = dataset_cfg self.training = training self.class_names = class_names self.logger = logger self.root_path = ( root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH) ) self.logger = logger self.allow_empty_gt_boxes = allow_empty_gt_boxes if self.dataset_cfg is None or class_names is None: return self.point_cloud_range = np.array( self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32 ) self.point_feature_encoder = PointFeatureEncoder( self.dataset_cfg.POINT_FEATURE_ENCODING, point_cloud_range=self.point_cloud_range, ) self.data_augmentor = ( DataAugmentor( self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger, ) if self.training else None ) self.data_processor = DataProcessor( self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training, ) self.grid_size = self.data_processor.grid_size self.voxel_size = self.data_processor.voxel_size self.total_epochs = 0 self._merge_all_iters_to_one_epoch = False @property def mode(self): return "train" if self.training else "test" def __getstate__(self): d = dict(self.__dict__) del d["logger"] return d def __setstate__(self, d): self.__dict__.update(d) @staticmethod def generate_prediction_dicts( batch_dict, pred_dicts, class_names, output_path=None ): """ To support a custom dataset, implement this function to receive the predicted results from the model, and then transform the unified normative coordinate to your required coordinate, and optionally save them to disk. Args: batch_dict: dict of original data from the dataloader pred_dicts: dict of predicted results from the model pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: if it is not None, save the results to this path Returns: """ raise NotImplementedError @staticmethod def __vis__(points, gt_boxes, ref_boxes=None, scores=None, use_fakelidar=False): import mayavi.mlab as mlab import visual_utils.visualize_utils as vis gt_boxes = copy.deepcopy(gt_boxes) if use_fakelidar: gt_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(gt_boxes) if ref_boxes is not None: ref_boxes = copy.deepcopy(ref_boxes) if use_fakelidar: ref_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(ref_boxes) vis.draw_scenes(points, gt_boxes, ref_boxes=ref_boxes, ref_scores=scores) mlab.show(stop=True) @staticmethod def __vis_fake__(points, gt_boxes, ref_boxes=None, scores=None, use_fakelidar=True): import mayavi.mlab as mlab import visual_utils.visualize_utils as vis gt_boxes = copy.deepcopy(gt_boxes) if use_fakelidar: gt_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(gt_boxes) if ref_boxes is not None: ref_boxes = copy.deepcopy(ref_boxes) if use_fakelidar: ref_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(ref_boxes) vis.draw_scenes(points, gt_boxes, ref_boxes=ref_boxes, ref_scores=scores) mlab.show(stop=True) @staticmethod def extract_fov_data(points, fov_degree, heading_angle): """ Args: points: (N, 3 + C) fov_degree: [0~180] heading_angle: [0~360] in lidar coords, 0 is the x-axis, increase clockwise Returns: """ half_fov_degree = fov_degree / 180 * np.pi / 2 heading_angle = -heading_angle / 180 * np.pi points_new = common_utils.rotate_points_along_z( points.copy()[np.newaxis, :, :], np.array([heading_angle]) )[0] angle = np.arctan2(points_new[:, 1], points_new[:, 0]) fov_mask = (np.abs(angle) < half_fov_degree) & (points_new[:, 0] > 0) points = points_new[fov_mask] return points @staticmethod def extract_fov_gt(gt_boxes, fov_degree, heading_angle): """ Args: anno_dict: fov_degree: [0~180] heading_angle: [0~360] in lidar coords, 0 is the x-axis, increase clockwise Returns: """ half_fov_degree = fov_degree / 180 * np.pi / 2 heading_angle = -heading_angle / 180 * np.pi gt_boxes_lidar = copy.deepcopy(gt_boxes) gt_boxes_lidar = common_utils.rotate_points_along_z( gt_boxes_lidar[np.newaxis, :, :], np.array([heading_angle]) )[0] gt_boxes_lidar[:, 6] += heading_angle gt_angle = np.arctan2(gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) fov_gt_mask = (np.abs(gt_angle) < half_fov_degree) & (gt_boxes_lidar[:, 0] > 0) return fov_gt_mask def fill_pseudo_labels(self, input_dict): gt_boxes = self_training_utils.load_ps_label(input_dict["frame_id"]) gt_scores = gt_boxes[:, 8] gt_classes = gt_boxes[:, 7] gt_boxes = gt_boxes[:, :7] # only suitable for only one classes, generating gt_names for prepare data gt_names = np.array(self.class_names)[np.abs(gt_classes.astype(np.int32)) - 1] input_dict["gt_boxes"] = gt_boxes input_dict["gt_names"] = gt_names input_dict["gt_classes"] = gt_classes input_dict["gt_scores"] = gt_scores input_dict["pos_ps_bbox"] = np.zeros((len(self.class_names)), dtype=np.float32) input_dict["ign_ps_bbox"] = np.zeros((len(self.class_names)), dtype=np.float32) for i in range(len(self.class_names)): num_total_boxes = (np.abs(gt_classes) == (i + 1)).sum() num_ps_bbox = (gt_classes == (i + 1)).sum() input_dict["pos_ps_bbox"][i] = num_ps_bbox input_dict["ign_ps_bbox"][i] = num_total_boxes - num_ps_bbox input_dict.pop("num_points_in_gt", None) def merge_all_iters_to_one_epoch(self, merge=True, epochs=None): if merge: self._merge_all_iters_to_one_epoch = True self.total_epochs = epochs else: self._merge_all_iters_to_one_epoch = False def __len__(self): raise NotImplementedError def __getitem__(self, index): """ To support a custom dataset, implement this function to load the raw data (and labels), then transform them to the unified normative coordinate and call the function self.prepare_data() to process the data and send them to the model. Args: index: Returns: """ raise NotImplementedError def prepare_data(self, data_dict): """ Args: data_dict: points: (N, 3 + C_in) gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] gt_names: optional, (N), string ... Returns: data_dict: frame_id: string points: (N, 3 + C_in) gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] gt_names: optional, (N), string use_lead_xyz: bool voxels: optional (num_voxels, max_points_per_voxel, 3 + C) voxel_coords: optional (num_voxels, 3) voxel_num_points: optional (num_voxels) ... """ # Handle unified car class. if "unicar" in self.class_names: # Waymo. data_dict["gt_names"] = np.where( data_dict["gt_names"] == "Vehicle", "unicar", data_dict["gt_names"], ) # nuScenes data_dict["gt_names"] = np.where( data_dict["gt_names"] == "car", "unicar", data_dict["gt_names"], ) if self.training: # filter gt_boxes without points num_points_in_gt = data_dict.get("num_points_in_gt", None) if num_points_in_gt is None: num_points_in_gt = ( roiaware_pool3d_utils.points_in_boxes_cpu( torch.from_numpy(data_dict["points"][:, :3]), torch.from_numpy(data_dict["gt_boxes"][:, :7]), ) .numpy() .sum(axis=1) ) mask = num_points_in_gt >= self.dataset_cfg.get("MIN_POINTS_OF_GT", 1) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) data_dict["gt_boxes"] = data_dict["gt_boxes"][mask] data_dict["gt_names"] = data_dict["gt_names"][mask] if "obj_ids" in data_dict: data_dict["obj_ids"] = data_dict["obj_ids"][mask] if "gt_classes" in data_dict: data_dict["gt_classes"] = data_dict["gt_classes"][mask] data_dict["gt_scores"] = data_dict["gt_scores"][mask] assert "gt_boxes" in data_dict, "gt_boxes should be provided for training" gt_boxes_mask = np.array( [n in self.class_names for n in data_dict["gt_names"]], dtype=np.bool_ ) # Run data augmentation. data_dict = self.data_augmentor.forward( data_dict={**data_dict, "gt_boxes_mask": gt_boxes_mask} ) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) # Ignore unselected classes. For example, Waymo annotates vehicles, # pedestrians, cyclists, and signs, but typically we only train for # vehicles, pedestrians, and cyclists. # Note: this is already done in self.data_augmentor.forward. if data_dict.get("gt_boxes", None) is not None: selected = common_utils.keep_arrays_by_name( data_dict["gt_names"], self.class_names ) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) data_dict["gt_boxes"] = data_dict["gt_boxes"][selected] data_dict["gt_names"] = data_dict["gt_names"][selected] if "obj_ids" in data_dict: data_dict["obj_ids"] = data_dict["obj_ids"][selected] # for pseudo label has ignore labels. if "gt_classes" not in data_dict: gt_classes = np.array( [self.class_names.index(n) + 1 for n in data_dict["gt_names"]], dtype=np.int32, ) else: gt_classes = data_dict["gt_classes"][selected] data_dict["gt_scores"] = data_dict["gt_scores"][selected] # Append gt_classes to the last column of gt_boxes. gt_boxes = np.concatenate( ( data_dict["gt_boxes"], gt_classes.reshape(-1, 1).astype(np.float32), ), axis=1, ) data_dict["gt_boxes"] = gt_boxes if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) data_dict = self.point_feature_encoder.forward(data_dict) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) data_dict = self.data_processor.forward(data_dict=data_dict) if "obj_ids" in data_dict: assert len(data_dict["gt_boxes"]) == len(data_dict["obj_ids"]) if ( self.training and len(data_dict["gt_boxes"]) == 0 and not self.allow_empty_gt_boxes ): new_index = np.random.randint(self.__len__()) return self.__getitem__(new_index) data_dict.pop("gt_names", None) data_dict.pop("gt_classes", None) return data_dict @staticmethod def collate_batch(batch_list, _unused=False): ####################################################################### # Handle key/val differences among mixed datasets ####################################################################### # Check 1: keys is_consistent_batch = True all_keys = set.union(*[set(sample.keys()) for sample in batch_list]) for sample in batch_list: if set(sample.keys()) != all_keys: is_consistent_batch = False break # Check 2: length of lidar_to_vehicle_poses if is_consistent_batch and "lidar_to_vehicle_poses" in all_keys: all_lengths = set( [len(sample["lidar_to_vehicle_poses"]) for sample in batch_list] ) if len(all_lengths) != 1: is_consistent_batch = False # Check 3: length of num_points_of_each_lidar if is_consistent_batch and "num_points_of_each_lidar" in all_keys: all_lengths = set( [len(sample["num_points_of_each_lidar"]) for sample in batch_list] ) if len(all_lengths) != 1: is_consistent_batch = False # Remove unique_keys + num_points_of_each_lidar + num_points_of_each_lidar shared_keys = set.intersection(*[set(sample.keys()) for sample in batch_list]) unique_keys = all_keys - shared_keys to_remove_keys = unique_keys.union( { "lidar_to_vehicle_poses", "num_points_of_each_lidar", } ) for i in range(len(batch_list)): for key in to_remove_keys: batch_list[i].pop(key, None) ####################################################################### data_dict = defaultdict(list) for cur_sample in batch_list: for key, val in cur_sample.items(): data_dict[key].append(val) batch_size = len(batch_list) ret = {} for key, val in data_dict.items(): try: if key in ["voxels", "voxel_num_points"]: ret[key] = np.concatenate(val, axis=0) elif key in ["points", "voxel_coords"]: coors = [] for i, coor in enumerate(val): coor_pad = np.pad( coor, ((0, 0), (1, 0)), mode="constant", constant_values=i ) coors.append(coor_pad) ret[key] = np.concatenate(coors, axis=0) elif key in ["gt_boxes"]: max_gt = max([len(x) for x in val]) batch_gt_boxes3d = np.zeros( (batch_size, max_gt, val[0].shape[-1]), dtype=np.float32 ) for k in range(batch_size): batch_gt_boxes3d[k, : val[k].__len__(), :] = val[k] ret[key] = batch_gt_boxes3d elif key in ["gt_scores"]: max_gt = max([len(x) for x in val]) batch_scores = np.zeros((batch_size, max_gt), dtype=np.float32) for k in range(batch_size): batch_scores[k, : val[k].__len__()] = val[k] ret[key] = batch_scores else: ret[key] = np.stack(val, axis=0) except: print("Error in collate_batch: key=%s" % key) raise TypeError ret["batch_size"] = batch_size return ret def eval(self): self.training = False self.data_processor.eval() def train(self): self.training = True self.data_processor.train()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_dataset.py
Python
import copy import pickle import numpy as np from skimage import io from pcdet.datasets import DatasetTemplate from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import ( box_utils, calibration_kitti, common_utils, object3d_kitti, self_training_utils, ) class KittiDataset(DatasetTemplate): def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None ): """ Args: root_path: dataset_cfg: class_names: training: logger: """ super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, ) self.split = self.dataset_cfg.DATA_SPLIT[self.mode] self.root_split_path = self.root_path / ( "training" if self.split != "test" else "testing" ) split_dir = ( self.root_path.parent.parent / "data_split" / "kitti" / f"{self.split}.txt" ) self.sample_id_list = ( [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None ) self.kitti_infos = [] self.include_kitti_data(self.mode) def include_kitti_data(self, mode): if self.logger is not None: self.logger.info("Loading KITTI dataset") kitti_infos = [] for info_path in self.dataset_cfg.INFO_PATH[mode]: info_path = self.root_path / info_path if not info_path.exists(): continue with open(info_path, "rb") as f: infos = pickle.load(f) kitti_infos.extend(infos) self.kitti_infos.extend(kitti_infos) if self.logger is not None: self.logger.info( "Total samples for KITTI dataset: %d" % (len(self.kitti_infos)) ) def set_split(self, split): super().__init__( dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger, ) self.split = split self.root_split_path = self.root_path / ( "training" if self.split != "test" else "testing" ) split_dir = ( self.root_path.parent.parent / "data_split" / "kitti" / f"{self.split}.txt" ) self.sample_id_list = ( [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None ) def get_lidar(self, idx): lidar_file = self.root_split_path / "velodyne" / ("%s.bin" % idx) assert lidar_file.exists() return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4) def get_image_shape(self, idx): img_file = self.root_split_path / "image_2" / ("%s.png" % idx) assert img_file.exists() return np.array(io.imread(img_file).shape[:2], dtype=np.int32) def get_label(self, idx): label_file = self.root_split_path / "label_2" / ("%s.txt" % idx) assert label_file.exists() return object3d_kitti.get_objects_from_label(label_file) def get_calib(self, idx): calib_file = self.root_split_path / "calib" / ("%s.txt" % idx) assert calib_file.exists() return calibration_kitti.Calibration(calib_file) def get_road_plane(self, idx): plane_file = self.root_split_path / "planes" / ("%s.txt" % idx) if not plane_file.exists(): return None with open(plane_file, "r") as f: lines = f.readlines() lines = [float(i) for i in lines[3].split()] plane = np.asarray(lines) # Ensure normal is always facing up, this is in the rectified camera coordinate if plane[1] > 0: plane = -plane norm = np.linalg.norm(plane[0:3]) plane = plane / norm return plane @staticmethod def get_fov_flag(pts_rect, img_shape, calib, margin=0): """ Args: pts_rect: img_shape: calib: margin Returns: """ pts_img, pts_rect_depth = calib.rect_to_img(pts_rect) val_flag_1 = np.logical_and( pts_img[:, 0] >= 0 - margin, pts_img[:, 0] < img_shape[1] + margin ) val_flag_2 = np.logical_and( pts_img[:, 1] >= 0 - margin, pts_img[:, 1] < img_shape[0] + margin ) val_flag_merge = np.logical_and(val_flag_1, val_flag_2) pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0) return pts_valid_flag def get_infos( self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None ): import concurrent.futures as futures def process_single_sequence(sample_idx): print("%s sample_idx: %s" % (self.split, sample_idx)) info = {} pc_info = {"num_features": 4, "lidar_idx": sample_idx} info["point_cloud"] = pc_info image_info = { "image_idx": sample_idx, "image_shape": self.get_image_shape(sample_idx), } info["image"] = image_info calib = self.get_calib(sample_idx) P2 = np.concatenate([calib.P2, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype) R0_4x4[3, 3] = 1.0 R0_4x4[:3, :3] = calib.R0 V2C_4x4 = np.concatenate( [calib.V2C, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0 ) calib_info = {"P2": P2, "R0_rect": R0_4x4, "Tr_velo_to_cam": V2C_4x4} info["calib"] = calib_info if has_label: obj_list = self.get_label(sample_idx) annotations = {} annotations["name"] = np.array([obj.cls_type for obj in obj_list]) annotations["truncated"] = np.array( [obj.truncation for obj in obj_list] ) annotations["occluded"] = np.array([obj.occlusion for obj in obj_list]) annotations["alpha"] = np.array([obj.alpha for obj in obj_list]) annotations["bbox"] = np.concatenate( [obj.box2d.reshape(1, 4) for obj in obj_list], axis=0 ) annotations["dimensions"] = np.array( [[obj.l, obj.h, obj.w] for obj in obj_list] ) # lhw(camera) format annotations["location"] = np.concatenate( [obj.loc.reshape(1, 3) for obj in obj_list], axis=0 ) annotations["rotation_y"] = np.array([obj.ry for obj in obj_list]) annotations["score"] = np.array([obj.score for obj in obj_list]) annotations["difficulty"] = np.array( [obj.level for obj in obj_list], np.int32 ) num_objects = len( [obj.cls_type for obj in obj_list if obj.cls_type != "DontCare"] ) num_gt = len(annotations["name"]) index = list(range(num_objects)) + [-1] * (num_gt - num_objects) annotations["index"] = np.array(index, dtype=np.int32) loc = annotations["location"][:num_objects] dims = annotations["dimensions"][:num_objects] rots = annotations["rotation_y"][:num_objects] loc_lidar = calib.rect_to_lidar(loc) l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3] loc_lidar[:, 2] += h[:, 0] / 2 gt_boxes_lidar = np.concatenate( [loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1 ) annotations["gt_boxes_lidar"] = gt_boxes_lidar info["annos"] = annotations if count_inside_pts: points = self.get_lidar(sample_idx) calib = self.get_calib(sample_idx) pts_rect = calib.lidar_to_rect(points[:, 0:3]) fov_flag = self.get_fov_flag( pts_rect, info["image"]["image_shape"], calib ) pts_fov = points[fov_flag] corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar) num_points_in_gt = -np.ones(num_gt, dtype=np.int32) for k in range(num_objects): flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k]) num_points_in_gt[k] = flag.sum() annotations["num_points_in_gt"] = num_points_in_gt return info sample_id_list = ( sample_id_list if sample_id_list is not None else self.sample_id_list ) with futures.ThreadPoolExecutor(num_workers) as executor: infos = executor.map(process_single_sequence, sample_id_list) return list(infos) def create_groundtruth_database( self, info_path=None, used_classes=None, split="train" ): import torch database_save_path = Path(self.root_path) / ( "gt_database" if split == "train" else ("gt_database_%s" % split) ) db_info_save_path = Path(self.root_path) / ("kitti_dbinfos_%s.pkl" % split) database_save_path.mkdir(parents=True, exist_ok=True) all_db_infos = {} with open(info_path, "rb") as f: infos = pickle.load(f) for k in range(len(infos)): print("gt_database sample: %d/%d" % (k + 1, len(infos))) info = infos[k] sample_idx = info["point_cloud"]["lidar_idx"] points = self.get_lidar(sample_idx) annos = info["annos"] names = annos["name"] difficulty = annos["difficulty"] bbox = annos["bbox"] gt_boxes = annos["gt_boxes_lidar"] num_obj = gt_boxes.shape[0] point_indices = roiaware_pool3d_utils.points_in_boxes_cpu( torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes) ).numpy() # (nboxes, npoints) for i in range(num_obj): filename = "%s_%s_%d.bin" % (sample_idx, names[i], i) filepath = database_save_path / filename gt_points = points[point_indices[i] > 0] gt_points[:, :3] -= gt_boxes[i, :3] with open(filepath, "w") as f: gt_points.tofile(f) if (used_classes is None) or names[i] in used_classes: db_path = str( filepath.relative_to(self.root_path) ) # gt_database/xxxxx.bin db_info = { "name": names[i], "path": db_path, "image_idx": sample_idx, "gt_idx": i, "box3d_lidar": gt_boxes[i], "num_points_in_gt": gt_points.shape[0], "difficulty": difficulty[i], "bbox": bbox[i], "score": annos["score"][i], } if names[i] in all_db_infos: all_db_infos[names[i]].append(db_info) else: all_db_infos[names[i]] = [db_info] for k, v in all_db_infos.items(): print("Database %s: %d" % (k, len(v))) with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) def generate_prediction_dicts( self, batch_dict, pred_dicts, class_names, output_path=None, force_no_filter=False, ): """ Args: batch_dict: frame_id: pred_dicts: list of pred_dicts pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: Returns: """ def get_template_prediction(num_samples): ret_dict = { "name": np.zeros(num_samples), "truncated": np.zeros(num_samples), "occluded": np.zeros(num_samples), "alpha": np.zeros(num_samples), "bbox": np.zeros([num_samples, 4]), "dimensions": np.zeros([num_samples, 3]), "location": np.zeros([num_samples, 3]), "rotation_y": np.zeros(num_samples), "score": np.zeros(num_samples), "boxes_lidar": np.zeros([num_samples, 7]), } return ret_dict def generate_single_sample_dict(batch_index, box_dict, force_no_filter=False): pred_scores = box_dict["pred_scores"].cpu().numpy() pred_boxes = box_dict["pred_boxes"].cpu().numpy() pred_labels = box_dict["pred_labels"].cpu().numpy() pred_dict = get_template_prediction(pred_scores.shape[0]) if pred_scores.shape[0] == 0: return pred_dict calib = batch_dict["calib"][batch_index] image_shape = batch_dict["image_shape"][batch_index] if self.dataset_cfg.get("SHIFT_COOR", None): pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR # BOX FILTER if ( self.dataset_cfg.get("TEST", None) and self.dataset_cfg.TEST.BOX_FILTER["FOV_FILTER"] and not force_no_filter ): box_preds_lidar_center = pred_boxes[:, 0:3] pts_rect = calib.lidar_to_rect(box_preds_lidar_center) fov_flag = self.get_fov_flag(pts_rect, image_shape, calib, margin=5) pred_boxes = pred_boxes[fov_flag] pred_labels = pred_labels[fov_flag] pred_scores = pred_scores[fov_flag] pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera( pred_boxes, calib ) pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes( pred_boxes_camera, calib, image_shape=image_shape ) pred_dict["name"] = np.array(class_names)[pred_labels - 1] pred_dict["alpha"] = ( -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6] ) pred_dict["bbox"] = pred_boxes_img pred_dict["dimensions"] = pred_boxes_camera[:, 3:6] pred_dict["location"] = pred_boxes_camera[:, 0:3] pred_dict["rotation_y"] = pred_boxes_camera[:, 6] pred_dict["score"] = pred_scores pred_dict["boxes_lidar"] = pred_boxes return pred_dict annos = [] for index, box_dict in enumerate(pred_dicts): frame_id = batch_dict["frame_id"][index] single_pred_dict = generate_single_sample_dict( index, box_dict, force_no_filter=force_no_filter ) single_pred_dict["frame_id"] = frame_id annos.append(single_pred_dict) if output_path is not None: cur_det_file = output_path / ("%s.txt" % frame_id) with open(cur_det_file, "w") as f: bbox = single_pred_dict["bbox"] loc = single_pred_dict["location"] dims = single_pred_dict["dimensions"] # lhw -> hwl for idx in range(len(bbox)): print( "%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f" % ( single_pred_dict["name"][idx], single_pred_dict["alpha"][idx], bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3], dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0], loc[idx][1], loc[idx][2], single_pred_dict["rotation_y"][idx], single_pred_dict["score"][idx], ), file=f, ) return annos def evaluation(self, det_annos, class_names, **kwargs): if "annos" not in self.kitti_infos[0].keys(): return None, {} from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval eval_det_annos = copy.deepcopy(det_annos) eval_gt_annos = [copy.deepcopy(info["annos"]) for info in self.kitti_infos] save_all_eval_results = False if save_all_eval_results: with open("kitti_eval_info.pkl", "wb") as f: data = { "eval_det_annos": eval_det_annos, "eval_gt_annos": eval_gt_annos, } pickle.dump(data, f) ap_result_str, ap_dict = kitti_eval.get_official_eval_result( eval_gt_annos, eval_det_annos, class_names ) return ap_result_str, ap_dict def evaluation_with_custom_annos( self, det_annos, gt_annos, class_names, **kwargs, ): """ Only used for copy-paste experiments. gt_annos are hard-coded to have "Car" type. """ if "annos" not in self.kitti_infos[0].keys(): return None, {} from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval eval_det_annos = copy.deepcopy(det_annos) gt_annos = copy.deepcopy(gt_annos) eval_gt_annos = [copy.deepcopy(info["annos"]) for info in self.kitti_infos] if len(gt_annos) != len(eval_gt_annos): raise ValueError( "gt_annos and eval_gt_annos have different lengths: " f"{len(gt_annos)} vs {len(eval_gt_annos)}" ) # Replace specific values of "Car" in eval_gt_annos. # This needs to be very precisely. # - "name" : No change # - "truncated" : No change # - "occluded" : No change # - "alpha" : Masked update with gt_annos # slight diff (due to pixel precision?) # - "bbox" : Masked update with gt_annos # slight diff (due to pixel precision?) # - "dimensions" : Masked update with gt_annos # same # - "location" : Masked update with gt_annos # same # - "rotation_y" : Masked update with gt_annos # same # - "score" : No change # - "difficulty" : No change # - "index" : No change # - "gt_boxes_lidar" : Double masked ("Car" and "DontCare") # same # - "num_points_in_gt": No change for i in range(len(eval_gt_annos)): eval_mask = eval_gt_annos[i]["name"] == "Car" docare_names = eval_gt_annos[i]["name"][ eval_gt_annos[i]["name"] != "DontCare" ] docare_and_eval_mask = docare_names == "Car" if len(eval_gt_annos[i]["alpha"][eval_mask]) != len(gt_annos[i]["alpha"]): print( f"Index {i} has different number of gt boxes, where " f"len(eval_gt_annos[i]['alpha'][eval_mask]) = " f"{len(eval_gt_annos[i]['alpha'][eval_mask])} and " f"len(gt_annos[i]['alpha']) = {len(gt_annos[i]['alpha'])}" ) import ipdb ipdb.set_trace() pass eval_gt_annos[i]["alpha"][eval_mask] = gt_annos[i]["alpha"] eval_gt_annos[i]["bbox"][eval_mask] = gt_annos[i]["bbox"] eval_gt_annos[i]["dimensions"][eval_mask] = gt_annos[i]["dimensions"] eval_gt_annos[i]["location"][eval_mask] = gt_annos[i]["location"] eval_gt_annos[i]["rotation_y"][eval_mask] = gt_annos[i]["rotation_y"] eval_gt_annos[i]["gt_boxes_lidar"][docare_and_eval_mask] = gt_annos[i][ "boxes_lidar" ][:, :7] ap_result_str, ap_dict = kitti_eval.get_official_eval_result( gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=class_names, ) return ap_result_str, ap_dict def __len__(self): if self._merge_all_iters_to_one_epoch: return len(self.kitti_infos) * self.total_epochs return len(self.kitti_infos) def __getitem__(self, index): # index = 4 if self._merge_all_iters_to_one_epoch: index = index % len(self.kitti_infos) info = copy.deepcopy(self.kitti_infos[index]) sample_idx = info["point_cloud"]["lidar_idx"] points = self.get_lidar(sample_idx) calib = self.get_calib(sample_idx) img_shape = info["image"]["image_shape"] if self.dataset_cfg.FOV_POINTS_ONLY: pts_rect = calib.lidar_to_rect(points[:, 0:3]) fov_flag = self.get_fov_flag(pts_rect, img_shape, calib) points = points[fov_flag] if self.dataset_cfg.get("SHIFT_COOR", None): points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32) input_dict = { "points": points, "frame_id": sample_idx, "calib": calib, "image_shape": img_shape, } if "annos" in info: annos = info["annos"] annos = common_utils.drop_info_with_name(annos, name="DontCare") loc, dims, rots = ( annos["location"], annos["dimensions"], annos["rotation_y"], ) gt_names = annos["name"] gt_boxes_camera = np.concatenate( [loc, dims, rots[..., np.newaxis]], axis=1 ).astype(np.float32) gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar( gt_boxes_camera, calib ) if self.dataset_cfg.get("SHIFT_COOR", None): gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR input_dict.update({"gt_names": gt_names, "gt_boxes": gt_boxes_lidar}) if self.dataset_cfg.get("REMOVE_ORIGIN_GTS", None) and self.training: input_dict["points"] = box_utils.remove_points_in_boxes3d( input_dict["points"], input_dict["gt_boxes"] ) mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_) input_dict["gt_boxes"] = input_dict["gt_boxes"][mask] input_dict["gt_names"] = input_dict["gt_names"][mask] if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: input_dict["gt_boxes"] = None # for debug only # gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_) # debug_dict = {'gt_boxes': copy.deepcopy(gt_boxes_lidar[gt_boxes_mask])} road_plane = self.get_road_plane(sample_idx) if road_plane is not None: input_dict["road_plane"] = road_plane # load saved pseudo label for unlabel data if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: self.fill_pseudo_labels(input_dict) data_dict = self.prepare_data(data_dict=input_dict) return data_dict def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4): dataset = KittiDataset( dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False, ) train_split, val_split = "train", "val" train_filename = save_path / ("kitti_infos_%s.pkl" % train_split) val_filename = save_path / ("kitti_infos_%s.pkl" % val_split) trainval_filename = save_path / "kitti_infos_trainval.pkl" test_filename = save_path / "kitti_infos_test.pkl" print("---------------Start to generate data infos---------------") dataset.set_split(train_split) kitti_infos_train = dataset.get_infos( num_workers=workers, has_label=True, count_inside_pts=True ) with open(train_filename, "wb") as f: pickle.dump(kitti_infos_train, f) print("Kitti info train file is saved to %s" % train_filename) dataset.set_split(val_split) kitti_infos_val = dataset.get_infos( num_workers=workers, has_label=True, count_inside_pts=True ) with open(val_filename, "wb") as f: pickle.dump(kitti_infos_val, f) print("Kitti info val file is saved to %s" % val_filename) with open(trainval_filename, "wb") as f: pickle.dump(kitti_infos_train + kitti_infos_val, f) print("Kitti info trainval file is saved to %s" % trainval_filename) dataset.set_split("test") kitti_infos_test = dataset.get_infos( num_workers=workers, has_label=False, count_inside_pts=False ) with open(test_filename, "wb") as f: pickle.dump(kitti_infos_test, f) print("Kitti info test file is saved to %s" % test_filename) print( "---------------Start create groundtruth database for data augmentation---------------" ) dataset.set_split(train_split) dataset.create_groundtruth_database(train_filename, split=train_split) print("---------------Data preparation Done---------------") if __name__ == "__main__": import sys if sys.argv.__len__() > 1 and sys.argv[1] == "create_kitti_infos": from pathlib import Path import yaml from easydict import EasyDict dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2]))) ROOT_DIR = (Path(__file__).resolve().parent / "../../../").resolve() create_kitti_infos( dataset_cfg=dataset_cfg, class_names=["Car", "Pedestrian", "Cyclist"], data_path=ROOT_DIR / "data" / "kitti", save_path=ROOT_DIR / "data" / "kitti", )
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_eval.py
Python
import argparse import copy import pickle import numpy as np from pcdet.datasets.kitti import kitti_utils from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval def filter_by_range( infos, gt_key, range_min=0, range_max=80, is_pred=False, dataset="kitti" ): infos = copy.deepcopy(infos) total_objs = 0 for i, info in enumerate(infos): if is_pred: info.pop("truncated", None) info.pop("occluded", None) location = info["location"] range_distance = np.linalg.norm(location[:, [0, 2]], axis=-1) mask = (range_distance >= range_min) & (range_distance <= range_max) total_objs += mask.sum() for key, val in info.items(): if isinstance(val, np.ndarray): if key == gt_key: info[key] = val[mask[: val.shape[0]]] # ignore the Don't Care mask elif key in [ "car_from_global", "fov_gt_flag", "gt_boxes_velocity", "gt_boxes_token", "cam_intrinsic", "ref_from_car", "gt_boxes", "num_lidar_pts", "num_radar_pts", ]: continue else: try: info[key] = val[mask] except: import ipdb ipdb.set_trace(context=20) return infos, total_objs def transform_to_kitti_format(pred_infos, gt_annos, dataset, fakelidar): if dataset == "waymo": map_name_to_kitti = { "Vehicle": "Car", "Pedestrian": "Pedestrian", "Cyclist": "Cyclist", "Sign": "Sign", "Car": "Car", } elif dataset in ["lyft", "nuscenes"]: map_name_to_kitti = { "car": "Car", "pedestrian": "Pedestrian", "truck": "Truck", } else: raise NotImplementedError kwargs = { "is_gt": True, "GT_FILTER": True, "FOV_FILTER": True, "FOV_DEGREE": 90, "FOV_ANGLE": 0, "RANGE_FILTER": [0, -40, -10, 70.4, 40, 10], } kitti_utils.transform_annotations_to_kitti_format( pred_infos, map_name_to_kitti=map_name_to_kitti ) kitti_utils.transform_annotations_to_kitti_format( gt_annos, map_name_to_kitti=map_name_to_kitti, info_with_fakelidar=fakelidar, **kwargs, ) def main(): parser = argparse.ArgumentParser(description="arg parser") parser.add_argument("--pred_infos", type=str, default=None, help="pickle file") parser.add_argument("--gt_infos", type=str, default=None, help="pickle file") parser.add_argument("--class_names", type=str, nargs="+", default=["Car"], help="") parser.add_argument("--dataset", type=str, default="kitti", help="") parser.add_argument("--fakelidar", type=bool, default=False, help="") args = parser.parse_args() pred_infos = pickle.load(open(args.pred_infos, "rb")) gt_infos = pickle.load(open(args.gt_infos, "rb")) if args.dataset in ["kitti"]: gt_annos = [info["annos"] for info in gt_infos] else: gt_annos = gt_infos gt_keys = { "kitti": ["gt_boxes_lidar"], "lyft": "gt_boxes_lidar", "nuscenes": "gt_boxes_lidar", } # For other datasets if args.dataset != "kitti": transform_to_kitti_format(pred_infos, gt_annos, args.dataset, args.fakelidar) print("------------------Start to eval------------------------") range_list = [[0, 1000], [0, 30], [30, 50], [50, 80]] for cur_range in range_list: cur_pred_info, num_pred_objs = filter_by_range( pred_infos, gt_keys[args.dataset], range_min=cur_range[0], range_max=cur_range[1], is_pred=True, dataset=args.dataset, ) cur_gt_annos, num_gt_objs = filter_by_range( gt_annos, gt_keys[args.dataset], range_min=cur_range[0], range_max=cur_range[1], dataset=args.dataset, ) ap_result_str, ap_dict = kitti_eval.get_official_eval_result( cur_gt_annos, cur_pred_info, current_classes=["Car"] ) print( f"----------Range={cur_range}, avg_pred_objs={num_pred_objs / len(pred_infos)}, " f"avg_gt_objs={num_gt_objs / len(gt_infos)}-------------" ) print(ap_result_str) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_object_eval_python/eval.py
Python
import io as sysio import numba import numpy as np from pcdet.datasets.kitti.kitti_object_eval_python.rotate_iou import rotate_iou_gpu_eval @numba.jit def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41): scores.sort() scores = scores[::-1] current_recall = 0 thresholds = [] for i, score in enumerate(scores): l_recall = (i + 1) / num_gt if i < (len(scores) - 1): r_recall = (i + 2) / num_gt else: r_recall = l_recall if ((r_recall - current_recall) < (current_recall - l_recall)) and ( i < (len(scores) - 1) ): continue # recall = l_recall thresholds.append(score) current_recall += 1 / (num_sample_pts - 1.0) return thresholds def clean_data(gt_anno, dt_anno, current_class, difficulty): CLASS_NAMES = ["car", "pedestrian", "cyclist", "van", "person_sitting", "truck"] MIN_HEIGHT = [40, 25, 25] MAX_OCCLUSION = [0, 1, 2] MAX_TRUNCATION = [0.15, 0.3, 0.5] dc_bboxes, ignored_gt, ignored_dt = [], [], [] current_cls_name = CLASS_NAMES[current_class].lower() num_gt = len(gt_anno["name"]) num_dt = len(dt_anno["name"]) num_valid_gt = 0 for i in range(num_gt): bbox = gt_anno["bbox"][i] gt_name = gt_anno["name"][i].lower() height = bbox[3] - bbox[1] valid_class = -1 if gt_name == current_cls_name: valid_class = 1 elif ( current_cls_name == "Pedestrian".lower() and "Person_sitting".lower() == gt_name ): valid_class = 0 elif current_cls_name == "Car".lower() and "Van".lower() == gt_name: valid_class = 0 else: valid_class = -1 ignore = False if ( (gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty]) or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty]) or (height <= MIN_HEIGHT[difficulty]) ): # if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1: ignore = True if valid_class == 1 and not ignore: ignored_gt.append(0) num_valid_gt += 1 elif valid_class == 0 or (ignore and (valid_class == 1)): ignored_gt.append(1) else: ignored_gt.append(-1) # for i in range(num_gt): if gt_anno["name"][i] == "DontCare": dc_bboxes.append(gt_anno["bbox"][i]) for i in range(num_dt): if dt_anno["name"][i].lower() == current_cls_name: valid_class = 1 else: valid_class = -1 height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1]) if height < MIN_HEIGHT[difficulty]: ignored_dt.append(1) elif valid_class == 1: ignored_dt.append(0) else: ignored_dt.append(-1) return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes @numba.jit(nopython=True) def image_box_overlap(boxes, query_boxes, criterion=-1): N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): qbox_area = (query_boxes[k, 2] - query_boxes[k, 0]) * ( query_boxes[k, 3] - query_boxes[k, 1] ) for n in range(N): iw = min(boxes[n, 2], query_boxes[k, 2]) - max( boxes[n, 0], query_boxes[k, 0] ) if iw > 0: ih = min(boxes[n, 3], query_boxes[k, 3]) - max( boxes[n, 1], query_boxes[k, 1] ) if ih > 0: if criterion == -1: ua = ( (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih ) elif criterion == 0: ua = (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1]) elif criterion == 1: ua = qbox_area else: ua = 1.0 overlaps[n, k] = iw * ih / ua return overlaps def bev_box_overlap(boxes, qboxes, criterion=-1): riou = rotate_iou_gpu_eval(boxes, qboxes, criterion) return riou @numba.jit(nopython=True, parallel=True) def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1): # ONLY support overlap in CAMERA, not lider. N, K = boxes.shape[0], qboxes.shape[0] for i in range(N): for j in range(K): if rinc[i, j] > 0: # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] + # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1])) iw = min(boxes[i, 1], qboxes[j, 1]) - max( boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4] ) if iw > 0: area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5] area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5] inc = iw * rinc[i, j] if criterion == -1: ua = area1 + area2 - inc elif criterion == 0: ua = area1 elif criterion == 1: ua = area2 else: ua = inc rinc[i, j] = inc / ua else: rinc[i, j] = 0.0 def d3_box_overlap(boxes, qboxes, criterion=-1): rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]], qboxes[:, [0, 2, 3, 5, 6]], 2) d3_box_overlap_kernel(boxes, qboxes, rinc, criterion) return rinc @numba.jit(nopython=True) def compute_statistics_jit( overlaps, gt_datas, dt_datas, ignored_gt, ignored_det, dc_bboxes, metric, min_overlap, thresh=0, compute_fp=False, compute_aos=False, ): det_size = dt_datas.shape[0] gt_size = gt_datas.shape[0] dt_scores = dt_datas[:, -1] dt_alphas = dt_datas[:, 4] gt_alphas = gt_datas[:, 4] dt_bboxes = dt_datas[:, :4] gt_bboxes = gt_datas[:, :4] assigned_detection = [False] * det_size ignored_threshold = [False] * det_size if compute_fp: for i in range(det_size): if dt_scores[i] < thresh: ignored_threshold[i] = True NO_DETECTION = -10000000 tp, fp, fn, similarity = 0, 0, 0, 0 # thresholds = [0.0] # delta = [0.0] thresholds = np.zeros((gt_size,)) thresh_idx = 0 delta = np.zeros((gt_size,)) delta_idx = 0 for i in range(gt_size): if ignored_gt[i] == -1: continue det_idx = -1 valid_detection = NO_DETECTION max_overlap = 0 assigned_ignored_det = False for j in range(det_size): if ignored_det[j] == -1: continue if assigned_detection[j]: continue if ignored_threshold[j]: continue overlap = overlaps[j, i] dt_score = dt_scores[j] if ( not compute_fp and (overlap > min_overlap) and dt_score > valid_detection ): det_idx = j valid_detection = dt_score elif ( compute_fp and (overlap > min_overlap) and (overlap > max_overlap or assigned_ignored_det) and ignored_det[j] == 0 ): max_overlap = overlap det_idx = j valid_detection = 1 assigned_ignored_det = False elif ( compute_fp and (overlap > min_overlap) and (valid_detection == NO_DETECTION) and ignored_det[j] == 1 ): det_idx = j valid_detection = 1 assigned_ignored_det = True if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0: fn += 1 elif (valid_detection != NO_DETECTION) and ( ignored_gt[i] == 1 or ignored_det[det_idx] == 1 ): assigned_detection[det_idx] = True elif valid_detection != NO_DETECTION: tp += 1 # thresholds.append(dt_scores[det_idx]) thresholds[thresh_idx] = dt_scores[det_idx] thresh_idx += 1 if compute_aos: # delta.append(gt_alphas[i] - dt_alphas[det_idx]) delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx] delta_idx += 1 assigned_detection[det_idx] = True if compute_fp: for i in range(det_size): if not ( assigned_detection[i] or ignored_det[i] == -1 or ignored_det[i] == 1 or ignored_threshold[i] ): fp += 1 nstuff = 0 if metric == 0: overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0) for i in range(dc_bboxes.shape[0]): for j in range(det_size): if assigned_detection[j]: continue if ignored_det[j] == -1 or ignored_det[j] == 1: continue if ignored_threshold[j]: continue if overlaps_dt_dc[j, i] > min_overlap: assigned_detection[j] = True nstuff += 1 fp -= nstuff if compute_aos: tmp = np.zeros((fp + delta_idx,)) # tmp = [0] * fp for i in range(delta_idx): tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0 # tmp.append((1.0 + np.cos(delta[i])) / 2.0) # assert len(tmp) == fp + tp # assert len(delta) == tp if tp > 0 or fp > 0: similarity = np.sum(tmp) else: similarity = -1 return tp, fp, fn, similarity, thresholds[:thresh_idx] def get_split_parts(num, num_part): same_part = num // num_part remain_num = num % num_part if same_part == 0: return [num] if remain_num == 0: return [same_part] * num_part else: return [same_part] * num_part + [remain_num] @numba.jit(nopython=True) def fused_compute_statistics( overlaps, pr, gt_nums, dt_nums, dc_nums, gt_datas, dt_datas, dontcares, ignored_gts, ignored_dets, metric, min_overlap, thresholds, compute_aos=False, ): gt_num = 0 dt_num = 0 dc_num = 0 for i in range(gt_nums.shape[0]): for t, thresh in enumerate(thresholds): overlap = overlaps[ dt_num : dt_num + dt_nums[i], gt_num : gt_num + gt_nums[i] ] gt_data = gt_datas[gt_num : gt_num + gt_nums[i]] dt_data = dt_datas[dt_num : dt_num + dt_nums[i]] ignored_gt = ignored_gts[gt_num : gt_num + gt_nums[i]] ignored_det = ignored_dets[dt_num : dt_num + dt_nums[i]] dontcare = dontcares[dc_num : dc_num + dc_nums[i]] tp, fp, fn, similarity, _ = compute_statistics_jit( overlap, gt_data, dt_data, ignored_gt, ignored_det, dontcare, metric, min_overlap=min_overlap, thresh=thresh, compute_fp=True, compute_aos=compute_aos, ) pr[t, 0] += tp pr[t, 1] += fp pr[t, 2] += fn if similarity != -1: pr[t, 3] += similarity gt_num += gt_nums[i] dt_num += dt_nums[i] dc_num += dc_nums[i] def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50): """fast iou algorithm. this function can be used independently to do result analysis. Must be used in CAMERA coordinate system. Args: gt_annos: dict, must from get_label_annos() in kitti_common.py dt_annos: dict, must from get_label_annos() in kitti_common.py metric: eval type. 0: bbox, 1: bev, 2: 3d num_parts: int. a parameter for fast calculate algorithm """ assert len(gt_annos) == len(dt_annos) total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0) total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0) num_examples = len(gt_annos) split_parts = get_split_parts(num_examples, num_parts) parted_overlaps = [] example_idx = 0 for num_part in split_parts: gt_annos_part = gt_annos[example_idx : example_idx + num_part] dt_annos_part = dt_annos[example_idx : example_idx + num_part] if metric == 0: gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0) dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0) overlap_part = image_box_overlap(gt_boxes, dt_boxes) elif metric == 1: loc = np.concatenate([a["location"][:, [0, 2]] for a in gt_annos_part], 0) dims = np.concatenate( [a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0 ) rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) loc = np.concatenate([a["location"][:, [0, 2]] for a in dt_annos_part], 0) dims = np.concatenate( [a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0 ) rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(np.float64) elif metric == 2: loc = np.concatenate([a["location"] for a in gt_annos_part], 0) dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) loc = np.concatenate([a["location"] for a in dt_annos_part], 0) dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(np.float64) else: raise ValueError("unknown metric") parted_overlaps.append(overlap_part) example_idx += num_part overlaps = [] example_idx = 0 for j, num_part in enumerate(split_parts): gt_annos_part = gt_annos[example_idx : example_idx + num_part] dt_annos_part = dt_annos[example_idx : example_idx + num_part] gt_num_idx, dt_num_idx = 0, 0 for i in range(num_part): gt_box_num = total_gt_num[example_idx + i] dt_box_num = total_dt_num[example_idx + i] overlaps.append( parted_overlaps[j][ gt_num_idx : gt_num_idx + gt_box_num, dt_num_idx : dt_num_idx + dt_box_num, ] ) gt_num_idx += gt_box_num dt_num_idx += dt_box_num example_idx += num_part return overlaps, parted_overlaps, total_gt_num, total_dt_num def _prepare_data(gt_annos, dt_annos, current_class, difficulty): gt_datas_list = [] dt_datas_list = [] total_dc_num = [] ignored_gts, ignored_dets, dontcares = [], [], [] total_num_valid_gt = 0 for i in range(len(gt_annos)): rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets ignored_gts.append(np.array(ignored_gt, dtype=np.int64)) ignored_dets.append(np.array(ignored_det, dtype=np.int64)) if len(dc_bboxes) == 0: dc_bboxes = np.zeros((0, 4)).astype(np.float64) else: dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64) total_dc_num.append(dc_bboxes.shape[0]) dontcares.append(dc_bboxes) total_num_valid_gt += num_valid_gt gt_datas = np.concatenate( [gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1 ) dt_datas = np.concatenate( [ dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis], dt_annos[i]["score"][..., np.newaxis], ], 1, ) gt_datas_list.append(gt_datas) dt_datas_list.append(dt_datas) total_dc_num = np.stack(total_dc_num, axis=0) return ( gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt, ) def eval_class( gt_annos, dt_annos, current_classes, difficultys, metric, min_overlaps, compute_aos=False, num_parts=100, ): """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP. Args: gt_annos: dict, must from get_label_annos() in kitti_common.py dt_annos: dict, must from get_label_annos() in kitti_common.py current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard metric: eval type. 0: bbox, 1: bev, 2: 3d min_overlaps: float, min overlap. format: [num_overlap, metric, class]. num_parts: int. a parameter for fast calculate algorithm Returns: dict of recall, precision and aos """ assert len(gt_annos) == len(dt_annos) num_examples = len(gt_annos) split_parts = get_split_parts(num_examples, num_parts) rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts) overlaps, parted_overlaps, total_dt_num, total_gt_num = rets N_SAMPLE_PTS = 41 num_minoverlap = len(min_overlaps) num_class = len(current_classes) num_difficulty = len(difficultys) precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) for m, current_class in enumerate(current_classes): for l, difficulty in enumerate(difficultys): rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty) ( gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt, ) = rets for k, min_overlap in enumerate(min_overlaps[:, metric, m]): thresholdss = [] for i in range(len(gt_annos)): rets = compute_statistics_jit( overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], metric, min_overlap=min_overlap, thresh=0.0, compute_fp=False, ) tp, fp, fn, similarity, thresholds = rets thresholdss += thresholds.tolist() thresholdss = np.array(thresholdss) thresholds = get_thresholds(thresholdss, total_num_valid_gt) thresholds = np.array(thresholds) pr = np.zeros([len(thresholds), 4]) idx = 0 for j, num_part in enumerate(split_parts): gt_datas_part = np.concatenate( gt_datas_list[idx : idx + num_part], 0 ) dt_datas_part = np.concatenate( dt_datas_list[idx : idx + num_part], 0 ) dc_datas_part = np.concatenate(dontcares[idx : idx + num_part], 0) ignored_dets_part = np.concatenate( ignored_dets[idx : idx + num_part], 0 ) ignored_gts_part = np.concatenate( ignored_gts[idx : idx + num_part], 0 ) fused_compute_statistics( parted_overlaps[j], pr, total_gt_num[idx : idx + num_part], total_dt_num[idx : idx + num_part], total_dc_num[idx : idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, metric, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos, ) idx += num_part for i in range(len(thresholds)): recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2]) precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1]) if compute_aos: aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1]) for i in range(len(thresholds)): precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1) recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1) if compute_aos: aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1) ret_dict = { "recall": recall, "precision": precision, "orientation": aos, } return ret_dict def get_mAP(prec): sums = 0 for i in range(0, prec.shape[-1], 4): sums = sums + prec[..., i] return sums / 11 * 100 def get_mAP_R40(prec): sums = 0 for i in range(1, prec.shape[-1]): sums = sums + prec[..., i] return sums / 40 * 100 def print_str(value, *arg, sstream=None): if sstream is None: sstream = sysio.StringIO() sstream.truncate(0) sstream.seek(0) print(value, *arg, file=sstream) return sstream.getvalue() def do_eval( gt_annos, dt_annos, current_classes, min_overlaps, compute_aos=False, PR_detail_dict=None, ): # min_overlaps: [num_minoverlap, metric, num_class] difficultys = [0, 1, 2] ret = eval_class( gt_annos, dt_annos, current_classes, difficultys, 0, min_overlaps, compute_aos ) # ret: [num_class, num_diff, num_minoverlap, num_sample_points] mAP_bbox = get_mAP(ret["precision"]) mAP_bbox_R40 = get_mAP_R40(ret["precision"]) if PR_detail_dict is not None: PR_detail_dict["bbox"] = ret["precision"] mAP_aos = mAP_aos_R40 = None if compute_aos: mAP_aos = get_mAP(ret["orientation"]) mAP_aos_R40 = get_mAP_R40(ret["orientation"]) if PR_detail_dict is not None: PR_detail_dict["aos"] = ret["orientation"] ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, min_overlaps) mAP_bev = get_mAP(ret["precision"]) mAP_bev_R40 = get_mAP_R40(ret["precision"]) if PR_detail_dict is not None: PR_detail_dict["bev"] = ret["precision"] ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, min_overlaps) mAP_3d = get_mAP(ret["precision"]) mAP_3d_R40 = get_mAP_R40(ret["precision"]) if PR_detail_dict is not None: PR_detail_dict["3d"] = ret["precision"] return ( mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40, ) def do_coco_style_eval( gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos ): # overlap_ranges: [range, metric, num_class] min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]]) for i in range(overlap_ranges.shape[1]): for j in range(overlap_ranges.shape[2]): min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j]) mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval( gt_annos, dt_annos, current_classes, min_overlaps, compute_aos ) # ret: [num_class, num_diff, num_minoverlap] mAP_bbox = mAP_bbox.mean(-1) mAP_bev = mAP_bev.mean(-1) mAP_3d = mAP_3d.mean(-1) if mAP_aos is not None: mAP_aos = mAP_aos.mean(-1) return mAP_bbox, mAP_bev, mAP_3d, mAP_aos def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None): overlap_0_7 = np.array( [ [0.7, 0.5, 0.5, 0.7, 0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7], ] ) overlap_0_5 = np.array( [ [0.7, 0.5, 0.5, 0.7, 0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5], ] ) min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5] class_to_name = { 0: "Car", 1: "Pedestrian", 2: "Cyclist", 3: "Van", 4: "Person_sitting", 5: "Truck", } name_to_class = {v: n for n, v in class_to_name.items()} if not isinstance(current_classes, (list, tuple)): current_classes = [current_classes] current_classes_int = [] for curcls in current_classes: if isinstance(curcls, str): current_classes_int.append(name_to_class[curcls]) else: current_classes_int.append(curcls) current_classes = current_classes_int min_overlaps = min_overlaps[:, :, current_classes] result = "" # check whether alpha is valid compute_aos = False for anno in dt_annos: if anno["alpha"].shape[0] != 0: if anno["alpha"][0] != -10: compute_aos = True break ( mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40, ) = do_eval( gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict, ) ret_dict = {} for j, curcls in enumerate(current_classes): # mAP threshold array: [num_minoverlap, metric, class] # mAP result: [num_class, num_diff, num_minoverlap] for i in range(min_overlaps.shape[0]): result += print_str( ( f"{class_to_name[curcls]} " "AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]) ) ) result += print_str( ( f"bbox AP:{mAPbbox[j, 0, i]:.4f}, " f"{mAPbbox[j, 1, i]:.4f}, " f"{mAPbbox[j, 2, i]:.4f}" ) ) result += print_str( ( f"bev AP:{mAPbev[j, 0, i]:.4f}, " f"{mAPbev[j, 1, i]:.4f}, " f"{mAPbev[j, 2, i]:.4f}" ) ) result += print_str( ( f"3d AP:{mAP3d[j, 0, i]:.4f}, " f"{mAP3d[j, 1, i]:.4f}, " f"{mAP3d[j, 2, i]:.4f}" ) ) if compute_aos: result += print_str( ( f"aos AP:{mAPaos[j, 0, i]:.2f}, " f"{mAPaos[j, 1, i]:.2f}, " f"{mAPaos[j, 2, i]:.2f}" ) ) # if i == 0: # ret_dict['%s_aos/easy' % class_to_name[curcls]] = mAPaos[j, 0, 0] # ret_dict['%s_aos/moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0] # ret_dict['%s_aos/hard' % class_to_name[curcls]] = mAPaos[j, 2, 0] result += print_str( ( f"{class_to_name[curcls]} " "AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]) ) ) result += print_str( ( f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, " f"{mAPbbox_R40[j, 1, i]:.4f}, " f"{mAPbbox_R40[j, 2, i]:.4f}" ) ) result += print_str( ( f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, " f"{mAPbev_R40[j, 1, i]:.4f}, " f"{mAPbev_R40[j, 2, i]:.4f}" ) ) result += print_str( ( f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, " f"{mAP3d_R40[j, 1, i]:.4f}, " f"{mAP3d_R40[j, 2, i]:.4f}" ) ) if compute_aos: result += print_str( ( f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, " f"{mAPaos_R40[j, 1, i]:.2f}, " f"{mAPaos_R40[j, 2, i]:.2f}" ) ) if i == 0: ret_dict["%s_aos/easy_R40" % class_to_name[curcls]] = mAPaos_R40[ j, 0, 0 ] ret_dict["%s_aos/moderate_R40" % class_to_name[curcls]] = ( mAPaos_R40[j, 1, 0] ) ret_dict["%s_aos/hard_R40" % class_to_name[curcls]] = mAPaos_R40[ j, 2, 0 ] if i == 0: # ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0] # ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0] # ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0] # ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0] # ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0] # ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0] # ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0] # ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0] # ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0] ret_dict["%s_3d/easy_R40" % class_to_name[curcls]] = mAP3d_R40[j, 0, 0] ret_dict["%s_3d/moderate_R40" % class_to_name[curcls]] = mAP3d_R40[ j, 1, 0 ] ret_dict["%s_3d/hard_R40" % class_to_name[curcls]] = mAP3d_R40[j, 2, 0] ret_dict["%s_bev/easy_R40" % class_to_name[curcls]] = mAPbev_R40[ j, 0, 0 ] ret_dict["%s_bev/moderate_R40" % class_to_name[curcls]] = mAPbev_R40[ j, 1, 0 ] ret_dict["%s_bev/hard_R40" % class_to_name[curcls]] = mAPbev_R40[ j, 2, 0 ] ret_dict["%s_image/easy_R40" % class_to_name[curcls]] = mAPbbox_R40[ j, 0, 0 ] ret_dict["%s_image/moderate_R40" % class_to_name[curcls]] = mAPbbox_R40[ j, 1, 0 ] ret_dict["%s_image/hard_R40" % class_to_name[curcls]] = mAPbbox_R40[ j, 2, 0 ] return result, ret_dict def get_coco_eval_result(gt_annos, dt_annos, current_classes): class_to_name = { 0: "Car", 1: "Pedestrian", 2: "Cyclist", 3: "Van", 4: "Person_sitting", } class_to_range = { 0: [0.5, 0.95, 10], 1: [0.25, 0.7, 10], 2: [0.25, 0.7, 10], 3: [0.5, 0.95, 10], 4: [0.25, 0.7, 10], } name_to_class = {v: n for n, v in class_to_name.items()} if not isinstance(current_classes, (list, tuple)): current_classes = [current_classes] current_classes_int = [] for curcls in current_classes: if isinstance(curcls, str): current_classes_int.append(name_to_class[curcls]) else: current_classes_int.append(curcls) current_classes = current_classes_int overlap_ranges = np.zeros([3, 3, len(current_classes)]) for i, curcls in enumerate(current_classes): overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:, np.newaxis] result = "" # check whether alpha is valid compute_aos = False for anno in dt_annos: if anno["alpha"].shape[0] != 0: if anno["alpha"][0] != -10: compute_aos = True break mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval( gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos ) for j, curcls in enumerate(current_classes): # mAP threshold array: [num_minoverlap, metric, class] # mAP result: [num_class, num_diff, num_minoverlap] o_range = np.array(class_to_range[curcls])[[0, 2, 1]] o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1) result += print_str( ( f"{class_to_name[curcls]} " "coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range) ) ) result += print_str( ( f"bbox AP:{mAPbbox[j, 0]:.2f}, " f"{mAPbbox[j, 1]:.2f}, " f"{mAPbbox[j, 2]:.2f}" ) ) result += print_str( ( f"bev AP:{mAPbev[j, 0]:.2f}, " f"{mAPbev[j, 1]:.2f}, " f"{mAPbev[j, 2]:.2f}" ) ) result += print_str( (f"3d AP:{mAP3d[j, 0]:.2f}, " f"{mAP3d[j, 1]:.2f}, " f"{mAP3d[j, 2]:.2f}") ) if compute_aos: result += print_str( ( f"aos AP:{mAPaos[j, 0]:.2f}, " f"{mAPaos[j, 1]:.2f}, " f"{mAPaos[j, 2]:.2f}" ) ) return result
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py
Python
import time import fire import kitti_common as kitti from eval import get_coco_eval_result, get_official_eval_result def _read_imageset_file(path): with open(path, "r") as f: lines = f.readlines() return [int(line) for line in lines] def evaluate( label_path, result_path, label_split_file, current_class=0, coco=False, score_thresh=-1, ): dt_annos = kitti.get_label_annos(result_path) if score_thresh > 0: dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh) val_image_ids = _read_imageset_file(label_split_file) gt_annos = kitti.get_label_annos(label_path, val_image_ids) if coco: return get_coco_eval_result(gt_annos, dt_annos, current_class) else: return get_official_eval_result(gt_annos, dt_annos, current_class) if __name__ == "__main__": fire.Fire()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py
Python
import concurrent.futures as futures import os import pathlib import re from collections import OrderedDict import numpy as np from skimage import io def get_image_index_str(img_idx): return "{:06d}".format(img_idx) def get_kitti_info_path( idx, prefix, info_type="image_2", file_tail=".png", training=True, relative_path=True, ): img_idx_str = get_image_index_str(idx) img_idx_str += file_tail prefix = pathlib.Path(prefix) if training: file_path = pathlib.Path("training") / info_type / img_idx_str else: file_path = pathlib.Path("testing") / info_type / img_idx_str if not (prefix / file_path).exists(): raise ValueError("file not exist: {}".format(file_path)) if relative_path: return str(file_path) else: return str(prefix / file_path) def get_image_path(idx, prefix, training=True, relative_path=True): return get_kitti_info_path(idx, prefix, "image_2", ".png", training, relative_path) def get_label_path(idx, prefix, training=True, relative_path=True): return get_kitti_info_path(idx, prefix, "label_2", ".txt", training, relative_path) def get_velodyne_path(idx, prefix, training=True, relative_path=True): return get_kitti_info_path(idx, prefix, "velodyne", ".bin", training, relative_path) def get_calib_path(idx, prefix, training=True, relative_path=True): return get_kitti_info_path(idx, prefix, "calib", ".txt", training, relative_path) def _extend_matrix(mat): mat = np.concatenate([mat, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) return mat def get_kitti_image_info( path, training=True, label_info=True, velodyne=False, calib=False, image_ids=7481, extend_matrix=True, num_worker=8, relative_path=True, with_imageshape=True, ): # image_infos = [] root_path = pathlib.Path(path) if not isinstance(image_ids, list): image_ids = list(range(image_ids)) def map_func(idx): image_info = {"image_idx": idx} annotations = None if velodyne: image_info["velodyne_path"] = get_velodyne_path( idx, path, training, relative_path ) image_info["img_path"] = get_image_path(idx, path, training, relative_path) if with_imageshape: img_path = image_info["img_path"] if relative_path: img_path = str(root_path / img_path) image_info["img_shape"] = np.array( io.imread(img_path).shape[:2], dtype=np.int32 ) if label_info: label_path = get_label_path(idx, path, training, relative_path) if relative_path: label_path = str(root_path / label_path) annotations = get_label_anno(label_path) if calib: calib_path = get_calib_path(idx, path, training, relative_path=False) with open(calib_path, "r") as f: lines = f.readlines() P0 = np.array([float(info) for info in lines[0].split(" ")[1:13]]).reshape( [3, 4] ) P1 = np.array([float(info) for info in lines[1].split(" ")[1:13]]).reshape( [3, 4] ) P2 = np.array([float(info) for info in lines[2].split(" ")[1:13]]).reshape( [3, 4] ) P3 = np.array([float(info) for info in lines[3].split(" ")[1:13]]).reshape( [3, 4] ) if extend_matrix: P0 = _extend_matrix(P0) P1 = _extend_matrix(P1) P2 = _extend_matrix(P2) P3 = _extend_matrix(P3) image_info["calib/P0"] = P0 image_info["calib/P1"] = P1 image_info["calib/P2"] = P2 image_info["calib/P3"] = P3 R0_rect = np.array( [float(info) for info in lines[4].split(" ")[1:10]] ).reshape([3, 3]) if extend_matrix: rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) rect_4x4[3, 3] = 1.0 rect_4x4[:3, :3] = R0_rect else: rect_4x4 = R0_rect image_info["calib/R0_rect"] = rect_4x4 Tr_velo_to_cam = np.array( [float(info) for info in lines[5].split(" ")[1:13]] ).reshape([3, 4]) Tr_imu_to_velo = np.array( [float(info) for info in lines[6].split(" ")[1:13]] ).reshape([3, 4]) if extend_matrix: Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo) image_info["calib/Tr_velo_to_cam"] = Tr_velo_to_cam image_info["calib/Tr_imu_to_velo"] = Tr_imu_to_velo if annotations is not None: image_info["annos"] = annotations add_difficulty_to_annos(image_info) return image_info with futures.ThreadPoolExecutor(num_worker) as executor: image_infos = executor.map(map_func, image_ids) return list(image_infos) def filter_kitti_anno( image_anno, used_classes, used_difficulty=None, dontcare_iou=None ): if not isinstance(used_classes, (list, tuple)): used_classes = [used_classes] img_filtered_annotations = {} relevant_annotation_indices = [ i for i, x in enumerate(image_anno["name"]) if x in used_classes ] for key in image_anno.keys(): img_filtered_annotations[key] = image_anno[key][relevant_annotation_indices] if used_difficulty is not None: relevant_annotation_indices = [ i for i, x in enumerate(img_filtered_annotations["difficulty"]) if x in used_difficulty ] for key in image_anno.keys(): img_filtered_annotations[key] = img_filtered_annotations[key][ relevant_annotation_indices ] if "DontCare" in used_classes and dontcare_iou is not None: dont_care_indices = [ i for i, x in enumerate(img_filtered_annotations["name"]) if x == "DontCare" ] # bounding box format [y_min, x_min, y_max, x_max] all_boxes = img_filtered_annotations["bbox"] ious = iou(all_boxes, all_boxes[dont_care_indices]) # Remove all bounding boxes that overlap with a dontcare region. if ious.size > 0: boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou for key in image_anno.keys(): img_filtered_annotations[key] = img_filtered_annotations[key][ np.logical_not(boxes_to_remove) ] return img_filtered_annotations def filter_annos_low_score(image_annos, thresh): new_image_annos = [] for anno in image_annos: img_filtered_annotations = {} relevant_annotation_indices = [ i for i, s in enumerate(anno["score"]) if s >= thresh ] for key in anno.keys(): img_filtered_annotations[key] = anno[key][relevant_annotation_indices] new_image_annos.append(img_filtered_annotations) return new_image_annos def kitti_result_line(result_dict, precision=4): prec_float = "{" + ":.{}f".format(precision) + "}" res_line = [] all_field_default = OrderedDict( [ ("name", None), ("truncated", -1), ("occluded", -1), ("alpha", -10), ("bbox", None), ("dimensions", [-1, -1, -1]), ("location", [-1000, -1000, -1000]), ("rotation_y", -10), ("score", None), ] ) res_dict = [(key, None) for key, val in all_field_default.items()] res_dict = OrderedDict(res_dict) for key, val in result_dict.items(): if all_field_default[key] is None and val is None: raise ValueError("you must specify a value for {}".format(key)) res_dict[key] = val for key, val in res_dict.items(): if key == "name": res_line.append(val) elif key in ["truncated", "alpha", "rotation_y", "score"]: if val is None: res_line.append(str(all_field_default[key])) else: res_line.append(prec_float.format(val)) elif key == "occluded": if val is None: res_line.append(str(all_field_default[key])) else: res_line.append("{}".format(val)) elif key in ["bbox", "dimensions", "location"]: if val is None: res_line += [str(v) for v in all_field_default[key]] else: res_line += [prec_float.format(v) for v in val] else: raise ValueError("unknown key. supported key:{}".format(res_dict.keys())) return " ".join(res_line) def add_difficulty_to_annos(info): min_height = [40, 25, 25] # minimum height for evaluated groundtruth/detections max_occlusion = [ 0, 1, 2, ] # maximum occlusion level of the groundtruth used for eval_utils max_trunc = [ 0.15, 0.3, 0.5, ] # maximum truncation level of the groundtruth used for eval_utils annos = info["annos"] dims = annos["dimensions"] # lhw format bbox = annos["bbox"] height = bbox[:, 3] - bbox[:, 1] occlusion = annos["occluded"] truncation = annos["truncated"] diff = [] easy_mask = np.ones((len(dims),), dtype=np.bool) moderate_mask = np.ones((len(dims),), dtype=np.bool) hard_mask = np.ones((len(dims),), dtype=np.bool) i = 0 for h, o, t in zip(height, occlusion, truncation): if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: easy_mask[i] = False if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: moderate_mask[i] = False if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: hard_mask[i] = False i += 1 is_easy = easy_mask is_moderate = np.logical_xor(easy_mask, moderate_mask) is_hard = np.logical_xor(hard_mask, moderate_mask) for i in range(len(dims)): if is_easy[i]: diff.append(0) elif is_moderate[i]: diff.append(1) elif is_hard[i]: diff.append(2) else: diff.append(-1) annos["difficulty"] = np.array(diff, np.int32) return diff def get_label_anno(label_path): annotations = {} annotations.update( { "name": [], "truncated": [], "occluded": [], "alpha": [], "bbox": [], "dimensions": [], "location": [], "rotation_y": [], } ) with open(label_path, "r") as f: lines = f.readlines() # if len(lines) == 0 or len(lines[0]) < 15: # content = [] # else: content = [line.strip().split(" ") for line in lines] annotations["name"] = np.array([x[0] for x in content]) annotations["truncated"] = np.array([float(x[1]) for x in content]) annotations["occluded"] = np.array([int(x[2]) for x in content]) annotations["alpha"] = np.array([float(x[3]) for x in content]) annotations["bbox"] = np.array( [[float(info) for info in x[4:8]] for x in content] ).reshape(-1, 4) # dimensions will convert hwl format to standard lhw(camera) format. annotations["dimensions"] = np.array( [[float(info) for info in x[8:11]] for x in content] ).reshape(-1, 3)[:, [2, 0, 1]] annotations["location"] = np.array( [[float(info) for info in x[11:14]] for x in content] ).reshape(-1, 3) annotations["rotation_y"] = np.array([float(x[14]) for x in content]).reshape(-1) if len(content) != 0 and len(content[0]) == 16: # have score annotations["score"] = np.array([float(x[15]) for x in content]) else: annotations["score"] = np.zeros([len(annotations["bbox"])]) return annotations def get_label_annos(label_folder, image_ids=None): if image_ids is None: filepaths = pathlib.Path(label_folder).glob("*.txt") prog = re.compile(r"^\d{6}.txt$") filepaths = filter(lambda f: prog.match(f.name), filepaths) image_ids = [int(p.stem) for p in filepaths] image_ids = sorted(image_ids) if not isinstance(image_ids, list): image_ids = list(range(image_ids)) annos = [] label_folder = pathlib.Path(label_folder) for idx in image_ids: image_idx = get_image_index_str(idx) label_filename = label_folder / (image_idx + ".txt") annos.append(get_label_anno(label_filename)) return annos def area(boxes, add1=False): """Computes area of boxes. Args: boxes: Numpy array with shape [N, 4] holding N boxes Returns: a numpy array with shape [N*1] representing box areas """ if add1: return (boxes[:, 2] - boxes[:, 0] + 1.0) * (boxes[:, 3] - boxes[:, 1] + 1.0) else: return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) def intersection(boxes1, boxes2, add1=False): """Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) if add1: all_pairs_min_ymax += 1.0 intersect_heights = np.maximum( np.zeros(all_pairs_max_ymin.shape), all_pairs_min_ymax - all_pairs_max_ymin ) all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) if add1: all_pairs_min_xmax += 1.0 intersect_widths = np.maximum( np.zeros(all_pairs_max_xmin.shape), all_pairs_min_xmax - all_pairs_max_xmin ) return intersect_heights * intersect_widths def iou(boxes1, boxes2, add1=False): """Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ intersect = intersection(boxes1, boxes2, add1) area1 = area(boxes1, add1) area2 = area(boxes2, add1) union = np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) - intersect return intersect / union
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py
Python
##################### # Based on https://github.com/hongzhenwang/RRPN-revise # Licensed under The MIT License # Author: yanyan, scrin@foxmail.com ##################### import math import numba import numpy as np from numba import cuda @numba.jit(nopython=True) def div_up(m, n): return m // n + (m % n > 0) @cuda.jit("(float32[:], float32[:], float32[:])", device=True, inline=True) def trangle_area(a, b, c): return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0 @cuda.jit("(float32[:], int32)", device=True, inline=True) def area(int_pts, num_of_inter): area_val = 0.0 for i in range(num_of_inter - 2): area_val += abs( trangle_area( int_pts[:2], int_pts[2 * i + 2 : 2 * i + 4], int_pts[2 * i + 4 : 2 * i + 6], ) ) return area_val @cuda.jit("(float32[:], int32)", device=True, inline=True) def sort_vertex_in_convex_polygon(int_pts, num_of_inter): if num_of_inter > 0: center = cuda.local.array((2,), dtype=numba.float32) center[:] = 0.0 for i in range(num_of_inter): center[0] += int_pts[2 * i] center[1] += int_pts[2 * i + 1] center[0] /= num_of_inter center[1] /= num_of_inter v = cuda.local.array((2,), dtype=numba.float32) vs = cuda.local.array((16,), dtype=numba.float32) for i in range(num_of_inter): v[0] = int_pts[2 * i] - center[0] v[1] = int_pts[2 * i + 1] - center[1] d = math.sqrt(v[0] * v[0] + v[1] * v[1]) v[0] = v[0] / d v[1] = v[1] / d if v[1] < 0: v[0] = -2 - v[0] vs[i] = v[0] j = 0 temp = 0 for i in range(1, num_of_inter): if vs[i - 1] > vs[i]: temp = vs[i] tx = int_pts[2 * i] ty = int_pts[2 * i + 1] j = i while j > 0 and vs[j - 1] > temp: vs[j] = vs[j - 1] int_pts[j * 2] = int_pts[j * 2 - 2] int_pts[j * 2 + 1] = int_pts[j * 2 - 1] j -= 1 vs[j] = temp int_pts[j * 2] = tx int_pts[j * 2 + 1] = ty @cuda.jit( "(float32[:], float32[:], int32, int32, float32[:])", device=True, inline=True ) def line_segment_intersection(pts1, pts2, i, j, temp_pts): A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[2 * i] A[1] = pts1[2 * i + 1] B[0] = pts1[2 * ((i + 1) % 4)] B[1] = pts1[2 * ((i + 1) % 4) + 1] C[0] = pts2[2 * j] C[1] = pts2[2 * j + 1] D[0] = pts2[2 * ((j + 1) % 4)] D[1] = pts2[2 * ((j + 1) % 4) + 1] BA0 = B[0] - A[0] BA1 = B[1] - A[1] DA0 = D[0] - A[0] CA0 = C[0] - A[0] DA1 = D[1] - A[1] CA1 = C[1] - A[1] acd = DA1 * CA0 > CA1 * DA0 bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = CA1 * BA0 > BA1 * CA0 abd = DA1 * BA0 > BA1 * DA0 if abc != abd: DC0 = D[0] - C[0] DC1 = D[1] - C[1] ABBA = A[0] * B[1] - B[0] * A[1] CDDC = C[0] * D[1] - D[0] * C[1] DH = BA1 * DC0 - BA0 * DC1 Dx = ABBA * DC0 - BA0 * CDDC Dy = ABBA * DC1 - BA1 * CDDC temp_pts[0] = Dx / DH temp_pts[1] = Dy / DH return True return False @cuda.jit( "(float32[:], float32[:], int32, int32, float32[:])", device=True, inline=True ) def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): a = cuda.local.array((2,), dtype=numba.float32) b = cuda.local.array((2,), dtype=numba.float32) c = cuda.local.array((2,), dtype=numba.float32) d = cuda.local.array((2,), dtype=numba.float32) a[0] = pts1[2 * i] a[1] = pts1[2 * i + 1] b[0] = pts1[2 * ((i + 1) % 4)] b[1] = pts1[2 * ((i + 1) % 4) + 1] c[0] = pts2[2 * j] c[1] = pts2[2 * j + 1] d[0] = pts2[2 * ((j + 1) % 4)] d[1] = pts2[2 * ((j + 1) % 4) + 1] area_abc = trangle_area(a, b, c) area_abd = trangle_area(a, b, d) if area_abc * area_abd >= 0: return False area_cda = trangle_area(c, d, a) area_cdb = area_cda + area_abc - area_abd if area_cda * area_cdb >= 0: return False t = area_cda / (area_abd - area_abc) dx = t * (b[0] - a[0]) dy = t * (b[1] - a[1]) temp_pts[0] = a[0] + dx temp_pts[1] = a[1] + dy return True @cuda.jit("(float32, float32, float32[:])", device=True, inline=True) def point_in_quadrilateral(pt_x, pt_y, corners): ab0 = corners[2] - corners[0] ab1 = corners[3] - corners[1] ad0 = corners[6] - corners[0] ad1 = corners[7] - corners[1] ap0 = pt_x - corners[0] ap1 = pt_y - corners[1] abab = ab0 * ab0 + ab1 * ab1 abap = ab0 * ap0 + ab1 * ap1 adad = ad0 * ad0 + ad1 * ad1 adap = ad0 * ap0 + ad1 * ap1 return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 @cuda.jit("(float32[:], float32[:], float32[:])", device=True, inline=True) def quadrilateral_intersection(pts1, pts2, int_pts): num_of_inter = 0 for i in range(4): if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): int_pts[num_of_inter * 2] = pts1[2 * i] int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] num_of_inter += 1 if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): int_pts[num_of_inter * 2] = pts2[2 * i] int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] num_of_inter += 1 temp_pts = cuda.local.array((2,), dtype=numba.float32) for i in range(4): for j in range(4): has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) if has_pts: int_pts[num_of_inter * 2] = temp_pts[0] int_pts[num_of_inter * 2 + 1] = temp_pts[1] num_of_inter += 1 return num_of_inter @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def rbbox_to_corners(corners, rbbox): # generate clockwise corners and rotate it clockwise angle = rbbox[4] a_cos = math.cos(angle) a_sin = math.sin(angle) center_x = rbbox[0] center_y = rbbox[1] x_d = rbbox[2] y_d = rbbox[3] corners_x = cuda.local.array((4,), dtype=numba.float32) corners_y = cuda.local.array((4,), dtype=numba.float32) corners_x[0] = -x_d / 2 corners_x[1] = -x_d / 2 corners_x[2] = x_d / 2 corners_x[3] = x_d / 2 corners_y[0] = -y_d / 2 corners_y[1] = y_d / 2 corners_y[2] = y_d / 2 corners_y[3] = -y_d / 2 for i in range(4): corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x corners[2 * i + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y @cuda.jit("(float32[:], float32[:])", device=True, inline=True) def inter(rbbox1, rbbox2): corners1 = cuda.local.array((8,), dtype=numba.float32) corners2 = cuda.local.array((8,), dtype=numba.float32) intersection_corners = cuda.local.array((16,), dtype=numba.float32) rbbox_to_corners(corners1, rbbox1) rbbox_to_corners(corners2, rbbox2) num_intersection = quadrilateral_intersection( corners1, corners2, intersection_corners ) sort_vertex_in_convex_polygon(intersection_corners, num_intersection) # print(intersection_corners.reshape([-1, 2])[:num_intersection]) return area(intersection_corners, num_intersection) @cuda.jit("(float32[:], float32[:], int32)", device=True, inline=True) def devRotateIoUEval(rbox1, rbox2, criterion=-1): area1 = rbox1[2] * rbox1[3] area2 = rbox2[2] * rbox2[3] area_inter = inter(rbox1, rbox2) if criterion == -1: return area_inter / (area1 + area2 - area_inter) elif criterion == 0: return area_inter / area1 elif criterion == 1: return area_inter / area2 else: return area_inter @cuda.jit("(int64, int64, float32[:], float32[:], float32[:], int32)", fastmath=False) def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1): threadsPerBlock = 8 * 8 row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) dev_query_box_idx = threadsPerBlock * col_start + tx dev_box_idx = threadsPerBlock * row_start + tx if tx < col_size: block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] if tx < row_size: block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] cuda.syncthreads() if tx < row_size: for i in range(col_size): offset = ( row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i ) dev_iou[offset] = devRotateIoUEval( block_qboxes[i * 5 : i * 5 + 5], block_boxes[tx * 5 : tx * 5 + 5], criterion, ) def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): """rotated box iou running in gpu. 500x faster than cpu version (take 5ms in one example with numba.cuda code). convert from [this project]( https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation). Args: boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, angles(clockwise when positive) query_boxes (float tensor: [K, 5]): [description] device_id (int, optional): Defaults to 0. [description] Returns: [type]: [description] """ box_dtype = boxes.dtype boxes = boxes.astype(np.float32) query_boxes = query_boxes.astype(np.float32) N = boxes.shape[0] K = query_boxes.shape[0] iou = np.zeros((N, K), dtype=np.float32) if N == 0 or K == 0: return iou threadsPerBlock = 8 * 8 cuda.select_device(device_id) blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) iou_dev = cuda.to_device(iou.reshape([-1]), stream) rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( N, K, boxes_dev, query_boxes_dev, iou_dev, criterion ) iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) return iou.astype(boxes.dtype)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/kitti/kitti_utils.py
Python
import numpy as np from pcdet.datasets.dataset import DatasetTemplate as Dataset from pcdet.utils import box_utils def transform_annotations_to_kitti_format( annos, map_name_to_kitti=None, info_with_fakelidar=False, **kwargs ): """ Args: annos: map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist) info_with_fakelidar: Returns: """ for anno in annos: if "name" not in anno: anno["name"] = anno["gt_names"] anno.pop("gt_names") for k in range(anno["name"].shape[0]): if anno["name"][k] in map_name_to_kitti: anno["name"][k] = map_name_to_kitti[anno["name"][k]] else: anno["name"][k] = "Person_sitting" if "boxes_lidar" in anno: gt_boxes_lidar = anno["boxes_lidar"].copy() elif "gt_boxes_lidar" in anno: gt_boxes_lidar = anno["gt_boxes_lidar"].copy() else: gt_boxes_lidar = anno["gt_boxes"].copy() # filter by fov if kwargs.get("is_gt", None) and kwargs.get("GT_FILTER", None): if kwargs.get("FOV_FILTER", None): gt_boxes_lidar = filter_by_fov(anno, gt_boxes_lidar, kwargs) # filter by range if kwargs.get("GT_FILTER", None) and kwargs.get("RANGE_FILTER", None): point_cloud_range = kwargs["RANGE_FILTER"] gt_boxes_lidar = filter_by_range( anno, gt_boxes_lidar, point_cloud_range, kwargs["is_gt"] ) if kwargs.get("GT_FILTER", None): anno["gt_boxes_lidar"] = gt_boxes_lidar anno["bbox"] = np.zeros((len(anno["name"]), 4)) anno["bbox"][:, 2:4] = 50 # [0, 0, 50, 50] anno["truncated"] = np.zeros(len(anno["name"])) anno["occluded"] = np.zeros(len(anno["name"])) if len(gt_boxes_lidar) > 0: if info_with_fakelidar: gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar( gt_boxes_lidar ) gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2 anno["location"] = np.zeros((gt_boxes_lidar.shape[0], 3)) anno["location"][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar anno["location"][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar anno["location"][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar dxdydz = gt_boxes_lidar[:, 3:6] anno["dimensions"] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw anno["rotation_y"] = -gt_boxes_lidar[:, 6] - np.pi / 2.0 anno["alpha"] = ( -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno["rotation_y"] ) else: anno["location"] = anno["dimensions"] = np.zeros((0, 3)) anno["rotation_y"] = anno["alpha"] = np.zeros(0) return annos def filter_by_range(anno, gt_boxes_lidar, point_cloud_range, is_gt): mask = box_utils.mask_boxes_outside_range_numpy( gt_boxes_lidar, point_cloud_range, min_num_corners=1 ) gt_boxes_lidar = gt_boxes_lidar[mask] anno["name"] = anno["name"][mask] if not is_gt: anno["score"] = anno["score"][mask] anno["pred_labels"] = anno["pred_labels"][mask] return gt_boxes_lidar def filter_by_fov(anno, gt_boxes_lidar, kwargs): fov_gt_flag = Dataset.extract_fov_gt( gt_boxes_lidar, kwargs["FOV_DEGREE"], kwargs["FOV_ANGLE"] ) gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag] anno["name"] = anno["name"][fov_gt_flag] return gt_boxes_lidar
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/lyft/lyft_dataset.py
Python
import copy import pickle from pathlib import Path import numpy as np from tqdm import tqdm from pcdet.datasets.dataset import DatasetTemplate from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import box_utils, common_utils, self_training_utils class LyftDataset(DatasetTemplate): def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None ): root_path = ( root_path if root_path is not None else Path(dataset_cfg.DATA_PATH) ) / dataset_cfg.VERSION super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, ) self.infos = [] self.include_lyft_data(self.mode) def include_lyft_data(self, mode): self.logger.info("Loading lyft dataset") lyft_infos = [] for info_path in self.dataset_cfg.INFO_PATH[mode]: info_path = self.root_path / info_path if not info_path.exists(): continue with open(info_path, "rb") as f: infos = pickle.load(f) lyft_infos.extend(infos) self.infos.extend(lyft_infos) self.logger.info("Total samples for lyft dataset: %d" % (len(lyft_infos))) @staticmethod def remove_ego_points(points, center_radius=1.0): mask = ~( (np.abs(points[:, 0]) < center_radius * 1.5) & (np.abs(points[:, 1]) < center_radius) ) return points[mask] def get_lidar(self, index): info = self.infos[index] lidar_path = self.root_path / info["ref_info"]["LIDAR_TOP"]["lidar_path"] points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1) if points.shape[0] % 5 != 0: points = points[: points.shape[0] - (points.shape[0] % 5)] points = points.reshape([-1, 5])[:, :4] points = self.remove_ego_points(points, center_radius=1.5) return points def __len__(self): if self._merge_all_iters_to_one_epoch: return len(self.infos) * self.total_epochs return len(self.infos) def __getitem__(self, index): if self._merge_all_iters_to_one_epoch: index = index % len(self.infos) info = copy.deepcopy(self.infos[index]) points = self.get_lidar(index) if self.dataset_cfg.get("SHIFT_COOR", None): points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32) input_dict = { "points": points, "frame_id": Path(info["ref_info"]["LIDAR_TOP"]["lidar_path"]).stem, "metadata": {"token": info["token"]}, } if "gt_boxes" in info: input_dict.update( {"gt_boxes": info["gt_boxes"], "gt_names": info["gt_names"]} ) if self.dataset_cfg.get("SHIFT_COOR", None): input_dict["gt_boxes"][:, 0:3] += self.dataset_cfg.SHIFT_COOR if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: input_dict["gt_boxes"] = None # for debug only # gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_) # debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])} if self.dataset_cfg.get("FOV_POINTS_ONLY", None): input_dict["points"] = self.extract_fov_data( input_dict["points"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) if input_dict["gt_boxes"] is not None: fov_gt_flag = self.extract_fov_gt( input_dict["gt_boxes"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) input_dict.update( { "gt_names": input_dict["gt_names"][fov_gt_flag], "gt_boxes": input_dict["gt_boxes"][fov_gt_flag], } ) if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: self.fill_pseudo_labels(input_dict) data_dict = self.prepare_data(data_dict=input_dict) if self.dataset_cfg.get("SET_NAN_VELOCITY_TO_ZEROS", False): gt_boxes = data_dict["gt_boxes"] gt_boxes[np.isnan(gt_boxes)] = 0 data_dict["gt_boxes"] = gt_boxes if not self.dataset_cfg.PRED_VELOCITY and "gt_boxes" in data_dict: data_dict["gt_boxes"] = data_dict["gt_boxes"][:, [0, 1, 2, 3, 4, 5, 6, -1]] return data_dict def generate_prediction_dicts( self, batch_dict, pred_dicts, class_names, output_path=None ): """ Args: batch_dict: frame_id: pred_dicts: list of pred_dicts pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: Returns: """ def get_template_prediction(num_samples): ret_dict = { "name": np.zeros(num_samples), "score": np.zeros(num_samples), "boxes_lidar": np.zeros([num_samples, 7]), "pred_labels": np.zeros(num_samples), } return ret_dict def generate_single_sample_dict(box_dict): pred_scores = box_dict["pred_scores"].cpu().numpy() pred_boxes = box_dict["pred_boxes"].cpu().numpy() pred_labels = box_dict["pred_labels"].cpu().numpy() pred_dict = get_template_prediction(pred_scores.shape[0]) if pred_scores.shape[0] == 0: return pred_dict if self.dataset_cfg.get("SHIFT_COOR", None): pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR pred_dict["name"] = np.array(class_names)[pred_labels - 1] pred_dict["score"] = pred_scores pred_dict["boxes_lidar"] = pred_boxes pred_dict["pred_labels"] = pred_labels return pred_dict annos = [] for index, box_dict in enumerate(pred_dicts): single_pred_dict = generate_single_sample_dict(box_dict) single_pred_dict["frame_id"] = batch_dict["frame_id"][index] single_pred_dict["metadata"] = batch_dict["metadata"][index] annos.append(single_pred_dict) return annos def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names): from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval map_name_to_kitti = { "car": "Car", "pedestrian": "Pedestrian", "truck": "Truck", } def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False): for anno in annos: if "name" not in anno: anno["name"] = anno["gt_names"] anno.pop("gt_names") for k in range(anno["name"].shape[0]): if anno["name"][k] in map_name_to_kitti: anno["name"][k] = map_name_to_kitti[anno["name"][k]] else: anno["name"][k] = "Person_sitting" if "boxes_lidar" in anno: gt_boxes_lidar = anno["boxes_lidar"].copy() else: gt_boxes_lidar = anno["gt_boxes"].copy() # filter by range if ( self.dataset_cfg.get("GT_FILTER", None) and self.dataset_cfg.GT_FILTER.RANGE_FILTER ): if self.dataset_cfg.GT_FILTER.get("RANGE", None): point_cloud_range = self.dataset_cfg.GT_FILTER.RANGE else: point_cloud_range = self.point_cloud_range point_cloud_range[2] = -10 point_cloud_range[5] = 10 mask = box_utils.mask_boxes_outside_range_numpy( gt_boxes_lidar, point_cloud_range, min_num_corners=1 ) gt_boxes_lidar = gt_boxes_lidar[mask] anno["name"] = anno["name"][mask] if not is_gt: anno["score"] = anno["score"][mask] anno["pred_labels"] = anno["pred_labels"][mask] # filter by fov if is_gt and self.dataset_cfg.get("GT_FILTER", None): if self.dataset_cfg.GT_FILTER.get("FOV_FILTER", None): fov_gt_flag = self.extract_fov_gt( gt_boxes_lidar, self.dataset_cfg["FOV_DEGREE"], self.dataset_cfg["FOV_ANGLE"], ) gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag] anno["name"] = anno["name"][fov_gt_flag] anno["bbox"] = np.zeros((len(anno["name"]), 4)) anno["bbox"][:, 2:4] = 50 # [0, 0, 50, 50] anno["truncated"] = np.zeros(len(anno["name"])) anno["occluded"] = np.zeros(len(anno["name"])) if len(gt_boxes_lidar) > 0: if info_with_fakelidar: gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar( gt_boxes_lidar ) gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2 anno["location"] = np.zeros((gt_boxes_lidar.shape[0], 3)) anno["location"][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar anno["location"][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar anno["location"][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar dxdydz = gt_boxes_lidar[:, 3:6] anno["dimensions"] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw anno["rotation_y"] = -gt_boxes_lidar[:, 6] - np.pi / 2.0 anno["alpha"] = ( -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno["rotation_y"] ) else: anno["location"] = anno["dimensions"] = np.zeros((0, 3)) anno["rotation_y"] = anno["alpha"] = np.zeros(0) # self.filter_det_results(eval_det_annos, self.oracle_infos) transform_to_kitti_format(eval_det_annos) transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True) kitti_class_names = [] for x in class_names: if x in map_name_to_kitti: kitti_class_names.append(map_name_to_kitti[x]) else: kitti_class_names.append("Person_sitting") ap_result_str, ap_dict = kitti_eval.get_official_eval_result( gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names, ) return ap_result_str, ap_dict def evaluation(self, det_annos, class_names, **kwargs): if kwargs["eval_metric"] == "kitti": eval_det_annos = copy.deepcopy(det_annos) eval_gt_annos = copy.deepcopy(self.infos) return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names) else: raise NotImplementedError def create_groundtruth_database(self, used_classes=None): import torch database_save_path = self.root_path / f"gt_database_withvelo" db_info_save_path = self.root_path / f"lyft_dbinfos_withvelo.pkl" database_save_path.mkdir(parents=True, exist_ok=True) all_db_infos = {} for idx in tqdm(range(len(self.infos))): sample_idx = idx info = self.infos[idx] points = self.get_lidar(idx) gt_boxes = info["gt_boxes"] gt_names = info["gt_names"] box_idxs_of_pts = ( roiaware_pool3d_utils.points_in_boxes_gpu( torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda(), ) .long() .squeeze(dim=0) .cpu() .numpy() ) for i in range(gt_boxes.shape[0]): filename = "%s_%s_%d.bin" % (sample_idx, gt_names[i], i) filepath = database_save_path / filename gt_points = points[box_idxs_of_pts == i] gt_points[:, :3] -= gt_boxes[i, :3] with open(filepath, "w") as f: gt_points.tofile(f) if (used_classes is None) or gt_names[i] in used_classes: db_path = str( filepath.relative_to(self.root_path) ) # gt_database/xxxxx.bin db_info = { "name": gt_names[i], "path": db_path, "image_idx": sample_idx, "gt_idx": i, "box3d_lidar": gt_boxes[i], "num_points_in_gt": gt_points.shape[0], } if gt_names[i] in all_db_infos: all_db_infos[gt_names[i]].append(db_info) else: all_db_infos[gt_names[i]] = [db_info] for k, v in all_db_infos.items(): print("Database %s: %d" % (k, len(v))) with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) def create_lyft_info(version, data_path, save_path, split): from lyft_dataset_sdk.lyftdataset import LyftDataset from pcdet.datasets.lyft import lyft_utils data_path = data_path / version save_path = save_path / version split_path = data_path.parent.parent / "data_split" / "lyft" if split is not None: save_path = save_path / split split_path = split_path / split save_path.mkdir(exist_ok=True) assert version in ["trainval", "one_scene", "test"] if version == "trainval": train_split_path = split_path / "train.txt" val_split_path = split_path / "val.txt" elif version == "test": train_split_path = split_path / "test.txt" val_split_path = None elif version == "one_scene": train_split_path = split_path / "one_scene.txt" val_split_path = split_path / "one_scene.txt" else: raise NotImplementedError train_scenes = ( [x.strip() for x in open(train_split_path).readlines()] if train_split_path.exists() else [] ) val_scenes = ( [x.strip() for x in open(val_split_path).readlines()] if val_split_path.exists() else [] ) lyft = LyftDataset(json_path=data_path / "data", data_path=data_path, verbose=True) available_scenes = lyft_utils.get_available_scenes(lyft) available_scene_names = [s["name"] for s in available_scenes] train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) train_scenes = set( [ available_scenes[available_scene_names.index(s)]["token"] for s in train_scenes ] ) val_scenes = set( [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] ) print( "%s: train scene(%d), val scene(%d)" % (version, len(train_scenes), len(val_scenes)) ) train_lyft_infos, val_lyft_infos = lyft_utils.fill_trainval_infos( data_path=data_path, lyft=lyft, train_scenes=train_scenes, val_scenes=val_scenes, test="test" in version, ) if version == "test": print("test sample: %d" % len(train_lyft_infos)) with open(save_path / f"lyft_infos_test.pkl", "wb") as f: pickle.dump(train_lyft_infos, f) else: print( "train sample: %d, val sample: %d" % (len(train_lyft_infos), len(val_lyft_infos)) ) with open(save_path / f"lyft_infos_train.pkl", "wb") as f: pickle.dump(train_lyft_infos, f) with open(save_path / f"lyft_infos_val.pkl", "wb") as f: pickle.dump(val_lyft_infos, f) if __name__ == "__main__": import argparse from pathlib import Path import yaml from easydict import EasyDict parser = argparse.ArgumentParser(description="arg parser") parser.add_argument( "--cfg_file", type=str, default=None, help="specify the config of dataset" ) parser.add_argument("--func", type=str, default="create_lyft_infos", help="") parser.add_argument("--version", type=str, default="train", help="") parser.add_argument("--split", type=str, default=None, help="") args = parser.parse_args() if args.func == "create_lyft_infos": dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file))) ROOT_DIR = (Path(__file__).resolve().parent / "../../../").resolve() dataset_cfg.VERSION = args.version create_lyft_info( version=dataset_cfg.VERSION, data_path=ROOT_DIR / "data" / "lyft", save_path=ROOT_DIR / "data" / "lyft", split=args.split, ) lyft_dataset = LyftDataset( dataset_cfg=dataset_cfg, class_names=None, root_path=ROOT_DIR / "data" / "lyft", logger=common_utils.create_logger(), training=True, ) # lyft_dataset.create_groundtruth_database()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/lyft/lyft_utils.py
Python
""" The Lyft data pre-processing and evaluation is modified from https://github.com/poodarchu/Det3D """ import operator from functools import reduce from pathlib import Path import numpy as np import tqdm from pyquaternion import Quaternion from lyft_dataset_sdk.lyftdataset import LyftDataset from lyft_dataset_sdk.utils.geometry_utils import transform_matrix def get_available_scenes(lyft): available_scenes = [] print("total scene num:", len(lyft.scene)) for scene in lyft.scene: scene_token = scene["token"] scene_rec = lyft.get("scene", scene_token) sample_rec = lyft.get("sample", scene_rec["first_sample_token"]) sd_rec = lyft.get("sample_data", sample_rec["data"]["LIDAR_TOP"]) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = lyft.get_sample_data(sd_rec["token"]) if not Path(lidar_path).exists(): scene_not_exist = True break else: break if scene_not_exist: continue available_scenes.append(scene) print("exist scene num:", len(available_scenes)) return available_scenes def get_sample_data(lyft, sample_data_token): sd_rec = lyft.get("sample_data", sample_data_token) cs_rec = lyft.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) sensor_rec = lyft.get("sensor", cs_rec["sensor_token"]) pose_rec = lyft.get("ego_pose", sd_rec["ego_pose_token"]) boxes = lyft.get_boxes(sample_data_token) box_list = [] for box in boxes: box.translate(-np.array(pose_rec["translation"])) box.rotate(Quaternion(pose_rec["rotation"]).inverse) box.translate(-np.array(cs_rec["translation"])) box.rotate(Quaternion(cs_rec["rotation"]).inverse) box_list.append(box) return box_list, pose_rec def quaternion_yaw(q: Quaternion) -> float: """ Calculate the yaw angle from a quaternion. Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. It does not work for a box in the camera frame. :param q: Quaternion of interest. :return: Yaw angle in radians. """ # Project into xy plane. v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) # Measure yaw using arctan. yaw = np.arctan2(v[1], v[0]) return yaw def fill_trainval_infos(data_path, lyft, train_scenes, val_scenes, test=False): train_lyft_infos = [] val_lyft_infos = [] progress_bar = tqdm.tqdm( total=len(lyft.sample), desc="create_info", dynamic_ncols=True ) ref_chans = ["LIDAR_TOP", "LIDAR_FRONT_LEFT", "LIDAR_FRONT_RIGHT"] for index, sample in enumerate(lyft.sample): progress_bar.update() ref_info = {} for ref_chan in ref_chans: if ref_chan not in sample["data"]: continue ref_sd_token = sample["data"][ref_chan] ref_sd_rec = lyft.get("sample_data", ref_sd_token) ref_cs_token = ref_sd_rec["calibrated_sensor_token"] ref_cs_rec = lyft.get("calibrated_sensor", ref_cs_token) ref_to_car = transform_matrix( ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=False, ) ref_from_car = transform_matrix( ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=True, ) ref_lidar_path = lyft.get_sample_data_path(ref_sd_token) ref_info[ref_chan] = { "lidar_path": Path(ref_lidar_path).relative_to(data_path).__str__(), "ref_from_car": ref_from_car, "ref_to_car": ref_to_car, } if ref_chan == "LIDAR_TOP": ref_boxes, ref_pose_rec = get_sample_data(lyft, ref_sd_token) ref_time = 1e-6 * ref_sd_rec["timestamp"] car_from_global = transform_matrix( ref_pose_rec["translation"], Quaternion(ref_pose_rec["rotation"]), inverse=True, ) info = { "ref_info": ref_info, "token": sample["token"], "car_from_global": car_from_global, "timestamp": ref_time, } if not test: annotations = [ lyft.get("sample_annotation", token) for token in sample["anns"] ] locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape( -1, 1 ) velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3) names = np.array([b.name for b in ref_boxes]) tokens = np.array([b.token for b in ref_boxes]).reshape(-1, 1) gt_boxes = np.concatenate([locs, dims, rots], axis=1) assert len(annotations) == len(gt_boxes) info["gt_boxes"] = gt_boxes info["gt_boxes_velocity"] = velocity info["gt_names"] = names info["gt_boxes_token"] = tokens if sample["scene_token"] in train_scenes: train_lyft_infos.append(info) else: val_lyft_infos.append(info) progress_bar.close() return train_lyft_infos, val_lyft_infos
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/nuscenes/nuscenes_dataset.py
Python
import copy import pickle import sys from pathlib import Path import numpy as np import open3d as o3d from nuscenes.nuscenes import NuScenes, transform_matrix from pyquaternion import Quaternion from tqdm import tqdm from lit.path_utils import get_lit_paths from lit.recon_utils import bboxes_to_lineset, scale_bboxes_by_domain from pcdet.datasets.dataset import DatasetTemplate from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import box_utils, common_utils, self_training_utils def path_to_scene_and_timestamp(path): """ Args: path: samples/LIDAR_TOP/n008-2018-08-01-15-16-36-0400__LIDAR_TOP__1533151603547590.pcd.bin Return: ("n008-2018-08-01-15-16-36-0400", "1533151603547590") """ path = Path(path) path_stem = Path(path.stem).stem tokens = path_stem.split("__") if len(tokens) != 3: raise ValueError(f"Invalid path: {path}") scene = tokens[0] timestamp = tokens[2] return scene, timestamp class NuScenesDataset(DatasetTemplate): def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None, include_extras=False, # Include obj_ids ): self.include_extras = include_extras root_path = ( root_path if root_path is not None else Path(dataset_cfg.DATA_PATH) ) / dataset_cfg.VERSION super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, ) self.infos = [] self.include_nuscenes_data(self.mode) if self.training and self.dataset_cfg.get("BALANCED_RESAMPLING", False): self.infos = self.balanced_infos_resampling(self.infos) # Load the official NuScenes object for tokens mappings. E.g., we need # to get the instance_token given a annotation token. The hierarchy for # nuScenes is: # - log # log.json (we don't need to care) # - scene # scene.json # - sample # sample.json # - sample_annotation # sample_annotation.json # # Our goal is to: # 1) Extract scenes, each scene has list of Frames. # 2) Each Frame contains: # - [ ] scene_name # - [ ] sample_idx # - [ ] points # - [ ] num_points_of_each_lidar # - [ ] gt_boxes # - [ ] obj_ids # - [ ] frame_pose # - [ ] lidar_to_vehicle_poses # # sample_annotation.json: format of a box's annotation # { # # box token, e.g. infos["gt_boxes_token"][0] # "token": "173a50411564442ab195e132472fde71", # # token of the frame" # "sample_token": "e93e98b63d3b40209056d129dc53ceee", # # object token, i.e our obj_id # "instance_token": "5e2b6fd1fab74d04a79eefebbec357bb", # "visibility_token": "4", # "attribute_tokens": [], # "translation": [994.031, 612.51, 0.728], # "size": [0.3, 0.291, 0.734], # "rotation": [-0.04208490861058176, 0.0, 0.0, 0.9991140377690821], # "prev": "", # "next": "35034272eb1f413187ae7b6affb6ec7a", # "num_lidar_pts": 2, # "num_radar_pts": 0, # } # # instance.json: format of an instance object. # { # "token": "5e2b6fd1fab74d04a79eefebbec357bb", # "category_token": "85abebdccd4d46c7be428af5a6173947", # "nbr_annotations": 13, # "first_annotation_token": "173a50411564442ab195e132472fde71", # "last_annotation_token": "2cd832644d09479389ed0785e5de85c9", # } # # Ref: # https://www.nuscenes.org/tutorials/nuscenes_tutorial.html self.nusc = NuScenes( version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True, ) # Check if we shall use simulated data. # E.g., set dataset_cfg.DST_STYLE to "kitti" to use simulated # nuScenes data with KITTI style. self.dst_style = self.dataset_cfg.get("DST_STYLE", None) # Replace info["lidar_path"] with simulated lidar_path. # Not all training frames have simulated data, thus the self.infos may # not have the same length as before. if self.dst_style == "kitti": new_infos = [] # Ref: init_paths.py lit_paths = get_lit_paths( data_version=self.dataset_cfg.DATA_VERSION, data_domain="nuscenes", ) sim_frame_dir = lit_paths.to_kitti_sim_frame_dir for info in self.infos: sample_dict = self.nusc.get("sample", info["token"]) scene_name = sample_dict["scene_token"] if scene_name in lit_paths.scene_list: sample_tokens_in_scene = self.get_sample_tokens_from_scene_token( scene_name ) frame_index = sample_tokens_in_scene.index(info["token"]) lidar_path = sim_frame_dir / scene_name / f"{frame_index:04d}.npz" if lidar_path.is_file(): info["lidar_path"] = str(lidar_path) new_infos.append(info) else: print( f"[WARNING] " f"{lidar_path} does not exist but it " f"is in scene_list for data version " f"{lit_paths.data_version}" ) print( f"Found {len(new_infos)} frames with simulated data " f"out of {len(self.infos)}." ) self.infos = new_infos # For debugging, clip the info to first 128. # self.infos = self.infos[:128] # self.infos = self.infos[:int(len(self.infos) * 0.1)] def include_nuscenes_data(self, mode): self.logger.info("Loading NuScenes dataset") nuscenes_infos = [] for info_path in self.dataset_cfg.INFO_PATH[mode]: info_path = self.root_path / info_path if not info_path.exists(): continue with open(info_path, "rb") as f: infos = pickle.load(f) nuscenes_infos.extend(infos) self.infos.extend(nuscenes_infos) self.logger.info( "Total samples for NuScenes dataset: %d" % (len(nuscenes_infos)) ) # Sample lidar paths sample_lidar_paths = sorted([info["lidar_path"] for info in self.infos]) if len(sample_lidar_paths) != len(self.infos): raise ValueError( f"sample_lidar_paths({len(sample_lidar_paths)}) != infos({len(self.infos)})" ) sample_logs_timestamps = [ path_to_scene_and_timestamp(p) for p in sample_lidar_paths ] sample_log_set = set([s for s, _ in sample_logs_timestamps]) # Sweep lidar paths # data/nuscenes/v1.0-mini/sweeps/LIDAR_TOP sweep_dir = self.root_path / "sweeps" / "LIDAR_TOP" sweep_lidar_paths = sorted( [str(p.relative_to(self.root_path)) for p in sweep_dir.glob("*.bin")] ) sweep_log_timestamps = [ path_to_scene_and_timestamp(p) for p in sweep_lidar_paths ] # Filter sweep_scenes_timestamps with sample_scenes_set sweep_log_timestamps = [ (s, t) for s, t in sweep_log_timestamps if s in sample_log_set ] # Build log_map_timestamp_to_id # - log_map_timestamp_to_id["scene_x"]["1533151603547590"] = 0 # - log_map_timestamp_to_id["scene_x"]["1533151604048025"] = 1 # - ... # - log_map_timestamp_to_id["scene_x"]["1533151604548459"] = 100 # - log_map_timestamp_to_id["scene_y"]["1538984233547259"] = 0 # - log_map_timestamp_to_id["scene_y"]["1538984234047694"] = 1 # - ... # - log_map_timestamp_to_id["scene_y"]["1538984234548129"] = 100 all_log_timestamps = sample_logs_timestamps + sweep_log_timestamps all_log_timestamps = sorted(all_log_timestamps) log_map_timestamp_to_id = {} for log, timestamp in all_log_timestamps: if log not in log_map_timestamp_to_id: log_map_timestamp_to_id[log] = {} if timestamp in log_map_timestamp_to_id[log]: raise ValueError(f"Duplicate timestamp({timestamp}) in scene({log})") log_map_timestamp_to_id[log][timestamp] = len(log_map_timestamp_to_id[log]) # Print stats # self.logger.info("log, num_samples, num_sweeps, num_total") # for log in sorted(log_map_timestamp_to_id.keys()): # num_samples = len([t for s, t in sample_logs_timestamps if s == log]) # num_sweeps = len([t for s, t in sweep_log_timestamps if s == log]) # num_total = len(log_map_timestamp_to_id[log]) # if num_samples + num_sweeps != num_total: # raise ValueError( # f"num_samples({num_samples}) + num_sweeps({num_sweeps}) != num_total({num_total})" # ) # self.logger.info(f"{log}, {num_samples}, {num_sweeps}, {num_total}") self.log_map_timestamp_to_id = log_map_timestamp_to_id def balanced_infos_resampling(self, infos): """ Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492 """ if self.class_names is None: return infos cls_infos = {name: [] for name in self.class_names} for info in infos: for name in set(info["gt_names"]): if name in self.class_names: cls_infos[name].append(info) duplicated_samples = sum([len(v) for _, v in cls_infos.items()]) cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()} sampled_infos = [] frac = 1.0 / len(self.class_names) ratios = [frac / v for v in cls_dist.values()] for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios): sampled_infos += np.random.choice( cur_cls_infos, int(len(cur_cls_infos) * ratio) ).tolist() self.logger.info( "Total samples after balanced resampling: %s" % (len(sampled_infos)) ) cls_infos_new = {name: [] for name in self.class_names} for info in sampled_infos: for name in set(info["gt_names"]): if name in self.class_names: cls_infos_new[name].append(info) cls_dist_new = { k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items() } return sampled_infos @staticmethod def remove_ego_points(points, center_radius=1.0): # By default, # +x (red) : right # +y (green): front # +z (blue) : up mask = ~( (np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius * 1.5) ) return points[mask] def get_sweep(self, sweep_info): """ This function is typically not used for domain adaptation, as we always set max_sweeps=1 for train/eval. """ lidar_path = self.root_path / sweep_info["lidar_path"] points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape( [-1, 5] )[:, :4] points_sweep = self.remove_ego_points(points_sweep).T if sweep_info["transform_matrix"] is not None: num_points = points_sweep.shape[1] points_sweep[:3, :] = sweep_info["transform_matrix"].dot( np.vstack((points_sweep[:3, :], np.ones(num_points))) )[:3, :] cur_times = sweep_info["time_lag"] * np.ones((1, points_sweep.shape[1])) return points_sweep.T, cur_times.T def get_lidar_with_sweeps(self, index, max_sweeps=1, force_nearest_sweeps=False): info = self.infos[index] if Path(info["lidar_path"]).suffix == ".bin": lidar_path = self.root_path / info["lidar_path"] # Load the raw nuScenes bin file. points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape( [-1, 5] )[:, :4] elif Path(info["lidar_path"]).suffix == ".npz": lidar_path = info["lidar_path"] # Already the full relative path # Load "points" from npz (N, 3). points = np.load(lidar_path)["local_points"] # Append zeros as intensity -> (N, 4). points = np.concatenate( ( points, np.zeros((points.shape[0], 1), dtype=np.float32), ), axis=1, ).astype(np.float32) else: raise ValueError(f"Unknown lidar_path.suffix: {lidar_path}") points = self.remove_ego_points(points, center_radius=1.5) sweep_points_list = [points] sweep_times_list = [np.zeros((points.shape[0], 1))] # Get sweeps before the current key frame and after the previous key frame. # If max_sweeps=1, then this has no effect. if force_nearest_sweeps: ks = range(max_sweeps - 1) else: ks = np.random.choice(len(info["sweeps"]), max_sweeps - 1, replace=False) for k in ks: points_sweep, times_sweep = self.get_sweep(info["sweeps"][k]) sweep_points_list.append(points_sweep) sweep_times_list.append(times_sweep) points = np.concatenate(sweep_points_list, axis=0) times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype) points = np.concatenate((points, times), axis=1) return points def __len__(self): if self._merge_all_iters_to_one_epoch: return len(self.infos) * self.total_epochs return len(self.infos) def get_sample_tokens_from_scene_token(self, scene_token): """ Return a list of ordered sample tokens for a scene token. https://github.com/nutonomy/nuscenes-devkit/issues/713#issuecomment-1030722858 """ scene_dict = self.nusc.get("scene", scene_token) curr_token = scene_dict["first_sample_token"] keep_looping = True sample_tokens = [] while keep_looping: sample_tokens.append(curr_token) if curr_token == scene_dict["last_sample_token"]: keep_looping = False sample_dict = self.nusc.get("sample", curr_token) next_token = sample_dict["next"] curr_token = next_token assert len(sample_tokens) == scene_dict["nbr_samples"] return sample_tokens def __getitem__(self, index): if self._merge_all_iters_to_one_epoch: index = index % len(self.infos) info = copy.deepcopy(self.infos[index]) force_nearest_sweeps = self.dataset_cfg.get("FORCE_NEAREST_SWEEPS", False) # self.logger.info(f"max_sweeps: {self.dataset_cfg.MAX_SWEEPS}") # self.logger.info(f"force_nearest_sweeps: {force_nearest_sweeps}") points = self.get_lidar_with_sweeps( index, max_sweeps=self.dataset_cfg.MAX_SWEEPS, force_nearest_sweeps=force_nearest_sweeps, ) # Noise with std 0.01 # points[:, :3] += np.random.normal(0, 0.01, points[:, :3].shape) # Noise with std 0.1 # points[:, :3] += np.random.normal(0, 0.1, points[:, :3].shape) # Random ray drop with ratio 0.1 # mask = np.random.rand(points.shape[0]) > 0.1 # points = points[mask] # Random ray drop with ratio 0.2 # mask = np.random.rand(points.shape[0]) > 0.2 # points = points[mask] # Get the frame index within the scene. # - sample_dict["scene_token"]: folder name # - frame_index: xxxx.npy sample_dict = self.nusc.get("sample", info["token"]) sample_tokens_in_scene = self.get_sample_tokens_from_scene_token( sample_dict["scene_token"] ) frame_index = sample_tokens_in_scene.index(info["token"]) vis_points_sim = False if vis_points_sim: sim_frame_root = ( Path.home() / "research/lit/data/nuscenes/08_kitti_sim_frame" ) sim_frame_path = ( sim_frame_root / sample_dict["scene_token"] / f"{frame_index:04d}.npy" ) points_sim = np.load(sim_frame_path) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(points[:, :3]) pcd.paint_uniform_color([0, 0, 1]) pcd_sim = o3d.geometry.PointCloud() pcd_sim.points = o3d.utility.Vector3dVector(points_sim) pcd_sim.paint_uniform_color([1, 0, 0]) coords = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0) o3d.visualization.draw_geometries([pcd, pcd_sim, coords]) if self.dataset_cfg.get("SHIFT_COOR", None): points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32) # For purposes of getting points and poses, we only care about LIDAR_TOP. lidar_top_sample_data = self.nusc.get( "sample_data", sample_dict["data"]["LIDAR_TOP"] ) lidar_top_ego_pose = self.nusc.get( "ego_pose", lidar_top_sample_data["ego_pose_token"] ) lidar_top_calibrated_sensor = self.nusc.get( "calibrated_sensor", lidar_top_sample_data["calibrated_sensor_token"] ) lidar_top_to_vehicle_pose = transform_matrix( lidar_top_calibrated_sensor["translation"], Quaternion(lidar_top_calibrated_sensor["rotation"]), inverse=False, ) lidar_to_vehicle_poses = [lidar_top_to_vehicle_pose] # Frame pose (ego pose). frame_pose = transform_matrix( lidar_top_ego_pose["translation"], Quaternion(lidar_top_ego_pose["rotation"]), inverse=False, ) sanity_check_different_ego_pose = False if sanity_check_different_ego_pose: # Note that the number of ego_pose records in our loaded database is # the same as the number of sample_data records. These two records # exhibit a one-to-one correspondence. All the sensors on the ego do # not necessarily take their respective readings at the exact same # time. So to be very accurate, every sensor reading has an ego_pose # associated with the timestamp the reading was taken at. # # This means that the "ego_pose" retrieved from "CAM_BACK" and # "LIDAR_TOP" will have different tokens but the value are very # similar. # # See: https://github.com/nutonomy/nuscenes-devkit/issues/744 cam_back_sample_data = self.nusc.get( "sample_data", sample_dict["data"]["CAM_BACK"] ) cam_back_ego_pose = self.nusc.get( "ego_pose", cam_back_sample_data["ego_pose_token"] ) cam_back_calibrated_sensor = self.nusc.get( "calibrated_sensor", cam_back_sample_data["calibrated_sensor_token"] ) np.testing.assert_allclose( lidar_top_ego_pose["rotation"], cam_back_ego_pose["rotation"], atol=1e-2, rtol=1e-3, ) np.testing.assert_allclose( lidar_top_ego_pose["translation"], cam_back_ego_pose["translation"], atol=1e-2, rtol=1e-3, ) input_dict = { "points": points, "frame_id": Path(info["lidar_path"]).stem, "metadata": {"token": info["token"]}, "sample_idx": frame_index, # Consistent with WaymoDataset. } if "gt_boxes" in info: # For each gt_box, get the instance token to identify the object. # The gt_box may be filtered by nuscenes_utils.py::fill_trainval_infos. instance_tokens = [ self.nusc.get("sample_annotation", sample_annotation_token)[ "instance_token" ] for sample_annotation_token in info["gt_boxes_token"] ] # To be consistent to Waymo, we call it obj_ids. # Convert to numpy array first, for easy indexing. obj_ids = np.array(instance_tokens, dtype=np.str_) assert len(obj_ids) == len(info["gt_boxes"]) if self.dataset_cfg.get("FILTER_MIN_POINTS_IN_GT", False): mask = ( info["num_lidar_pts"] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1 ) else: mask = None if mask is None: input_dict["gt_names"] = info["gt_names"] input_dict["gt_boxes"] = info["gt_boxes"] input_dict["obj_ids"] = obj_ids else: input_dict["gt_names"] = info["gt_names"][mask] input_dict["gt_boxes"] = info["gt_boxes"][mask] input_dict["obj_ids"] = obj_ids[mask] assert len(input_dict["gt_boxes"]) == len(input_dict["obj_ids"]) if self.dataset_cfg.get("SHIFT_COOR", None): input_dict["gt_boxes"][:, 0:3] += self.dataset_cfg.SHIFT_COOR if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: input_dict["gt_boxes"] = None # for debug only # gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_) # debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])} if self.dataset_cfg.get("FOV_POINTS_ONLY", None): input_dict["points"] = self.extract_fov_data( input_dict["points"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) if input_dict["gt_boxes"] is not None: fov_gt_flag = self.extract_fov_gt( input_dict["gt_boxes"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) input_dict.update( { "gt_names": input_dict["gt_names"][fov_gt_flag], "gt_boxes": input_dict["gt_boxes"][fov_gt_flag], "obj_ids": input_dict["obj_ids"][fov_gt_flag], } ) assert len(input_dict["gt_boxes"]) == len(input_dict["obj_ids"]) if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: self.fill_pseudo_labels(input_dict) if self.dataset_cfg.get( "SET_NAN_VELOCITY_TO_ZEROS", False ) and not self.dataset_cfg.get("USE_PSEUDO_LABEL", None): gt_boxes = input_dict["gt_boxes"] gt_boxes[np.isnan(gt_boxes)] = 0 input_dict["gt_boxes"] = gt_boxes if ( not self.dataset_cfg.PRED_VELOCITY and "gt_boxes" in input_dict and not self.dataset_cfg.get("USE_PSEUDO_LABEL", None) ): input_dict["gt_boxes"] = input_dict["gt_boxes"][:, [0, 1, 2, 3, 4, 5, 6]] # Insert properties first, as self.prepare_data may recursively call __getitem__. input_dict["pose"] = frame_pose input_dict["lidar_to_vehicle_poses"] = lidar_to_vehicle_poses data_dict = self.prepare_data(data_dict=input_dict) # Unlike waymo we do this after prepare_data, as we hard-code to use the # top lidar. data_dict["num_points_of_each_lidar"] = [len(data_dict["points"])] # Extra info. if self.include_extras: obj_ids = [str(obj_id) for obj_id in data_dict["obj_ids"]] data_dict["obj_ids"] = obj_ids else: if "obj_ids" in data_dict: data_dict.pop("obj_ids") # Rotate from y pointing to front to x pointing to front. # - Rotate points # - Rotate gt_boxes return data_dict def generate_prediction_dicts( self, batch_dict, pred_dicts, class_names, output_path=None ): """ Args: batch_dict: frame_id: pred_dicts: list of pred_dicts pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: Returns: """ def get_template_prediction(num_samples): ret_dict = { "name": np.zeros(num_samples), "score": np.zeros(num_samples), "boxes_lidar": np.zeros([num_samples, 7]), "pred_labels": np.zeros(num_samples), } return ret_dict def generate_single_sample_dict(box_dict): pred_scores = box_dict["pred_scores"].cpu().numpy() pred_boxes = box_dict["pred_boxes"].cpu().numpy() pred_labels = box_dict["pred_labels"].cpu().numpy() pred_dict = get_template_prediction(pred_scores.shape[0]) if pred_scores.shape[0] == 0: return pred_dict if self.dataset_cfg.get("SHIFT_COOR", None): pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR pred_dict["name"] = np.array(class_names)[pred_labels - 1] pred_dict["score"] = pred_scores pred_dict["boxes_lidar"] = pred_boxes pred_dict["pred_labels"] = pred_labels return pred_dict annos = [] for index, box_dict in enumerate(pred_dicts): single_pred_dict = generate_single_sample_dict(box_dict) single_pred_dict["frame_id"] = batch_dict["frame_id"][index] single_pred_dict["metadata"] = batch_dict["metadata"][index] annos.append(single_pred_dict) return annos def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names): from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval map_name_to_kitti = { "car": "Car", "pedestrian": "Pedestrian", "truck": "Truck", } def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False): for anno in annos: if "name" not in anno: anno["name"] = anno["gt_names"] anno.pop("gt_names") for k in range(anno["name"].shape[0]): if anno["name"][k] in map_name_to_kitti: anno["name"][k] = map_name_to_kitti[anno["name"][k]] else: anno["name"][k] = "Person_sitting" if "boxes_lidar" in anno: gt_boxes_lidar = anno["boxes_lidar"].copy() else: gt_boxes_lidar = anno["gt_boxes"].copy() # filter by fov if is_gt and self.dataset_cfg.get("GT_FILTER", None): if self.dataset_cfg.GT_FILTER.get("FOV_FILTER", None): fov_gt_flag = self.extract_fov_gt( gt_boxes_lidar, self.dataset_cfg["FOV_DEGREE"], self.dataset_cfg["FOV_ANGLE"], ) gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag] anno["name"] = anno["name"][fov_gt_flag] anno["bbox"] = np.zeros((len(anno["name"]), 4)) anno["bbox"][:, 2:4] = 50 # [0, 0, 50, 50] anno["truncated"] = np.zeros(len(anno["name"])) anno["occluded"] = np.zeros(len(anno["name"])) if len(gt_boxes_lidar) > 0: if info_with_fakelidar: gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar( gt_boxes_lidar ) gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2 anno["location"] = np.zeros((gt_boxes_lidar.shape[0], 3)) anno["location"][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar anno["location"][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar anno["location"][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar dxdydz = gt_boxes_lidar[:, 3:6] anno["dimensions"] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw anno["rotation_y"] = -gt_boxes_lidar[:, 6] - np.pi / 2.0 anno["alpha"] = ( -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno["rotation_y"] ) else: anno["location"] = anno["dimensions"] = np.zeros((0, 3)) anno["rotation_y"] = anno["alpha"] = np.zeros(0) transform_to_kitti_format(eval_det_annos) transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True) kitti_class_names = [] for x in class_names: if x in map_name_to_kitti: kitti_class_names.append(map_name_to_kitti[x]) else: kitti_class_names.append("Person_sitting") ap_result_str, ap_dict = kitti_eval.get_official_eval_result( gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names, ) return ap_result_str, ap_dict def nuscene_eval(self, det_annos, class_names, **kwargs): import json from nuscenes.nuscenes import NuScenes from pcdet.datasets.nuscenes import nuscenes_utils nusc = NuScenes( version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True ) nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc) nusc_annos["meta"] = { "use_camera": False, "use_lidar": True, "use_radar": False, "use_map": False, "use_external": False, } output_path = Path(kwargs["output_path"]) output_path.mkdir(exist_ok=True, parents=True) res_path = str(output_path / "results_nusc.json") with open(res_path, "w") as f: json.dump(nusc_annos, f) self.logger.info(f"The predictions of NuScenes have been saved to {res_path}") if self.dataset_cfg.VERSION == "v1.0-test": return "No ground-truth annotations for evaluation", {} from nuscenes.eval.detection.config import config_factory from nuscenes.eval.detection.evaluate import NuScenesEval eval_set_map = { "v1.0-mini": "mini_val", "v1.0-trainval": "val", "v1.0-test": "test", } try: eval_version = "detection_cvpr_2019" eval_config = config_factory(eval_version) except: eval_version = "cvpr_2019" eval_config = config_factory(eval_version) nusc_eval = NuScenesEval( nusc, config=eval_config, result_path=res_path, eval_set=eval_set_map[self.dataset_cfg.VERSION], output_dir=str(output_path), verbose=True, ) metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False) with open(output_path / "metrics_summary.json", "r") as f: metrics = json.load(f) result_str, result_dict = nuscenes_utils.format_nuscene_results( metrics, self.class_names, version=eval_version ) return result_str, result_dict def evaluation(self, det_annos, class_names, **kwargs): if kwargs["eval_metric"] == "kitti": eval_det_annos = copy.deepcopy(det_annos) eval_gt_annos = copy.deepcopy(self.infos) return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names) elif kwargs["eval_metric"] == "nuscenes": return self.nuscene_eval(det_annos, class_names, **kwargs) else: raise NotImplementedError def create_groundtruth_database(self, used_classes=None, max_sweeps=10): import torch database_save_path = self.root_path / f"gt_database_{max_sweeps}sweeps_withvelo" db_info_save_path = ( self.root_path / f"nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl" ) database_save_path.mkdir(parents=True, exist_ok=True) all_db_infos = {} for idx in tqdm(range(len(self.infos))): sample_idx = idx info = self.infos[idx] points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps) gt_boxes = info["gt_boxes"] gt_names = info["gt_names"] box_idxs_of_pts = ( roiaware_pool3d_utils.points_in_boxes_gpu( torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda(), ) .long() .squeeze(dim=0) .cpu() .numpy() ) for i in range(gt_boxes.shape[0]): filename = "%s_%s_%d.bin" % (sample_idx, gt_names[i], i) filepath = database_save_path / filename gt_points = points[box_idxs_of_pts == i] gt_points[:, :3] -= gt_boxes[i, :3] with open(filepath, "w") as f: gt_points.tofile(f) if (used_classes is None) or gt_names[i] in used_classes: db_path = str( filepath.relative_to(self.root_path) ) # gt_database/xxxxx.bin db_info = { "name": gt_names[i], "path": db_path, "image_idx": sample_idx, "gt_idx": i, "box3d_lidar": gt_boxes[i], "num_points_in_gt": gt_points.shape[0], } if gt_names[i] in all_db_infos: all_db_infos[gt_names[i]].append(db_info) else: all_db_infos[gt_names[i]] = [db_info] for k, v in all_db_infos.items(): print("Database %s: %d" % (k, len(v))) with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) def create_nuscenes_info(version, data_path, save_path, max_sweeps=10): from nuscenes.nuscenes import NuScenes from nuscenes.utils import splits from pcdet.datasets.nuscenes import nuscenes_utils data_path = data_path / version save_path = save_path / version assert version in ["v1.0-trainval", "v1.0-test", "v1.0-mini"] if version == "v1.0-trainval": train_scenes = splits.train val_scenes = splits.val elif version == "v1.0-test": train_scenes = splits.test val_scenes = [] elif version == "v1.0-mini": train_scenes = splits.mini_train val_scenes = splits.mini_val else: raise NotImplementedError nusc = NuScenes(version=version, dataroot=data_path, verbose=True) available_scenes = nuscenes_utils.get_available_scenes(nusc) available_scene_names = [s["name"] for s in available_scenes] train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) train_scenes = set( [ available_scenes[available_scene_names.index(s)]["token"] for s in train_scenes ] ) val_scenes = set( [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] ) print( "%s: train scene(%d), val scene(%d)" % (version, len(train_scenes), len(val_scenes)) ) train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos( data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes, test="test" in version, max_sweeps=max_sweeps, ) if version == "v1.0-test": print("test sample: %d" % len(train_nusc_infos)) with open(save_path / f"nuscenes_infos_{max_sweeps}sweeps_test.pkl", "wb") as f: pickle.dump(train_nusc_infos, f) else: print( "train sample: %d, val sample: %d" % (len(train_nusc_infos), len(val_nusc_infos)) ) with open( save_path / f"nuscenes_infos_{max_sweeps}sweeps_train.pkl", "wb" ) as f: pickle.dump(train_nusc_infos, f) with open(save_path / f"nuscenes_infos_{max_sweeps}sweeps_val.pkl", "wb") as f: pickle.dump(val_nusc_infos, f) if __name__ == "__main__": import argparse from pathlib import Path import yaml from easydict import EasyDict parser = argparse.ArgumentParser(description="arg parser") parser.add_argument( "--cfg_file", type=str, default=None, help="specify the config of dataset" ) parser.add_argument("--func", type=str, default="create_nuscenes_infos", help="") parser.add_argument("--version", type=str, default="v1.0-trainval", help="") args = parser.parse_args() if args.func == "create_nuscenes_infos": dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file))) ROOT_DIR = (Path(__file__).resolve().parent / "../../../").resolve() dataset_cfg.VERSION = args.version create_nuscenes_info( version=dataset_cfg.VERSION, data_path=ROOT_DIR / "data" / "nuscenes", save_path=ROOT_DIR / "data" / "nuscenes", max_sweeps=dataset_cfg.MAX_SWEEPS, ) nuscenes_dataset = NuScenesDataset( dataset_cfg=dataset_cfg, class_names=None, root_path=ROOT_DIR / "data" / "nuscenes", logger=common_utils.create_logger(), training=True, ) nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS) nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/nuscenes/nuscenes_utils.py
Python
""" The NuScenes data pre-processing and evaluation is modified from https://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D """ import operator from functools import reduce from pathlib import Path import numpy as np import tqdm from nuscenes.utils.data_classes import Box from nuscenes.utils.geometry_utils import transform_matrix from pyquaternion import Quaternion map_name_from_general_to_detection = { "human.pedestrian.adult": "pedestrian", "human.pedestrian.child": "pedestrian", "human.pedestrian.wheelchair": "ignore", "human.pedestrian.stroller": "ignore", "human.pedestrian.personal_mobility": "ignore", "human.pedestrian.police_officer": "pedestrian", "human.pedestrian.construction_worker": "pedestrian", "animal": "ignore", "vehicle.car": "car", "vehicle.motorcycle": "motorcycle", "vehicle.bicycle": "bicycle", "vehicle.bus.bendy": "bus", "vehicle.bus.rigid": "bus", "vehicle.truck": "truck", "vehicle.construction": "construction_vehicle", "vehicle.emergency.ambulance": "ignore", "vehicle.emergency.police": "ignore", "vehicle.trailer": "trailer", "movable_object.barrier": "barrier", "movable_object.trafficcone": "traffic_cone", "movable_object.pushable_pullable": "ignore", "movable_object.debris": "ignore", "static_object.bicycle_rack": "ignore", } cls_attr_dist = { "barrier": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 0, "vehicle.parked": 0, "vehicle.stopped": 0, }, "bicycle": { "cycle.with_rider": 2791, "cycle.without_rider": 8946, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 0, "vehicle.parked": 0, "vehicle.stopped": 0, }, "bus": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 9092, "vehicle.parked": 3294, "vehicle.stopped": 3881, }, "car": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 114304, "vehicle.parked": 330133, "vehicle.stopped": 46898, }, "construction_vehicle": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 882, "vehicle.parked": 11549, "vehicle.stopped": 2102, }, "ignore": { "cycle.with_rider": 307, "cycle.without_rider": 73, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 165, "vehicle.parked": 400, "vehicle.stopped": 102, }, "motorcycle": { "cycle.with_rider": 4233, "cycle.without_rider": 8326, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 0, "vehicle.parked": 0, "vehicle.stopped": 0, }, "pedestrian": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 157444, "pedestrian.sitting_lying_down": 13939, "pedestrian.standing": 46530, "vehicle.moving": 0, "vehicle.parked": 0, "vehicle.stopped": 0, }, "traffic_cone": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 0, "vehicle.parked": 0, "vehicle.stopped": 0, }, "trailer": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 3421, "vehicle.parked": 19224, "vehicle.stopped": 1895, }, "truck": { "cycle.with_rider": 0, "cycle.without_rider": 0, "pedestrian.moving": 0, "pedestrian.sitting_lying_down": 0, "pedestrian.standing": 0, "vehicle.moving": 21339, "vehicle.parked": 55626, "vehicle.stopped": 11097, }, } def get_available_scenes(nusc): available_scenes = [] print("total scene num:", len(nusc.scene)) for scene in nusc.scene: scene_token = scene["token"] scene_rec = nusc.get("scene", scene_token) sample_rec = nusc.get("sample", scene_rec["first_sample_token"]) sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"]) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"]) if not Path(lidar_path).exists(): scene_not_exist = True break else: break # if not sd_rec['next'] == '': # sd_rec = nusc.get('sample_data', sd_rec['next']) # else: # has_more_frames = False if scene_not_exist: continue available_scenes.append(scene) print("exist scene num:", len(available_scenes)) return available_scenes def get_sample_data(nusc, sample_data_token, selected_anntokens=None): """ Returns the data path as well as all annotations related to that sample_data. Note that the boxes are transformed into the current sensor's coordinate frame. Args: nusc: sample_data_token: Sample_data token. selected_anntokens: If provided only return the selected annotation. Returns: """ # Retrieve sensor & pose records sd_record = nusc.get("sample_data", sample_data_token) cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"]) sensor_record = nusc.get("sensor", cs_record["sensor_token"]) pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"]) data_path = nusc.get_sample_data_path(sample_data_token) if sensor_record["modality"] == "camera": cam_intrinsic = np.array(cs_record["camera_intrinsic"]) imsize = (sd_record["width"], sd_record["height"]) else: cam_intrinsic = imsize = None # Retrieve all sample annotations and map to sensor coordinate system. if selected_anntokens is not None: boxes = list(map(nusc.get_box, selected_anntokens)) else: boxes = nusc.get_boxes(sample_data_token) # Make list of Box objects including coord system transforms. box_list = [] for box in boxes: box.velocity = nusc.box_velocity(box.token) # Move box to ego vehicle coord system box.translate(-np.array(pose_record["translation"])) box.rotate(Quaternion(pose_record["rotation"]).inverse) # Move box to sensor coord system box.translate(-np.array(cs_record["translation"])) box.rotate(Quaternion(cs_record["rotation"]).inverse) box_list.append(box) return data_path, box_list, cam_intrinsic def quaternion_yaw(q: Quaternion) -> float: """ Calculate the yaw angle from a quaternion. Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. It does not work for a box in the camera frame. :param q: Quaternion of interest. :return: Yaw angle in radians. """ # Project into xy plane. v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) # Measure yaw using arctan. yaw = np.arctan2(v[1], v[0]) return yaw def fill_trainval_infos( data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10 ): train_nusc_infos = [] val_nusc_infos = [] progress_bar = tqdm.tqdm( total=len(nusc.sample), desc="create_info", dynamic_ncols=True ) ref_chan = "LIDAR_TOP" # The radar channel from which we track back n sweeps to aggregate the point cloud. chan = "LIDAR_TOP" # The reference channel of the current sample_rec that the point clouds are mapped to. for index, sample in enumerate(nusc.sample): progress_bar.update() ref_sd_token = sample["data"][ref_chan] ref_sd_rec = nusc.get("sample_data", ref_sd_token) ref_cs_rec = nusc.get( "calibrated_sensor", ref_sd_rec["calibrated_sensor_token"] ) ref_pose_rec = nusc.get("ego_pose", ref_sd_rec["ego_pose_token"]) ref_time = 1e-6 * ref_sd_rec["timestamp"] ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token) ref_cam_front_token = sample["data"]["CAM_FRONT"] ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token) # Homogeneous transform from ego car frame to reference frame ref_from_car = transform_matrix( ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=True ) # Homogeneous transformation matrix from global to _current_ ego car frame car_from_global = transform_matrix( ref_pose_rec["translation"], Quaternion(ref_pose_rec["rotation"]), inverse=True, ) info = { "lidar_path": Path(ref_lidar_path).relative_to(data_path).__str__(), "cam_front_path": Path(ref_cam_path).relative_to(data_path).__str__(), "cam_intrinsic": ref_cam_intrinsic, "token": sample["token"], "sweeps": [], "ref_from_car": ref_from_car, "car_from_global": car_from_global, "timestamp": ref_time, } sample_data_token = sample["data"][chan] curr_sd_rec = nusc.get("sample_data", sample_data_token) sweeps = [] while len(sweeps) < max_sweeps - 1: if curr_sd_rec["prev"] == "": if len(sweeps) == 0: sweep = { "lidar_path": Path(ref_lidar_path) .relative_to(data_path) .__str__(), "sample_data_token": curr_sd_rec["token"], "transform_matrix": None, "time_lag": curr_sd_rec["timestamp"] * 0, } sweeps.append(sweep) else: sweeps.append(sweeps[-1]) else: curr_sd_rec = nusc.get("sample_data", curr_sd_rec["prev"]) # Get past pose current_pose_rec = nusc.get("ego_pose", curr_sd_rec["ego_pose_token"]) global_from_car = transform_matrix( current_pose_rec["translation"], Quaternion(current_pose_rec["rotation"]), inverse=False, ) # Homogeneous transformation matrix from sensor coordinate frame to ego car frame. current_cs_rec = nusc.get( "calibrated_sensor", curr_sd_rec["calibrated_sensor_token"] ) car_from_current = transform_matrix( current_cs_rec["translation"], Quaternion(current_cs_rec["rotation"]), inverse=False, ) tm = reduce( np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current], ) lidar_path = nusc.get_sample_data_path(curr_sd_rec["token"]) time_lag = ref_time - 1e-6 * curr_sd_rec["timestamp"] sweep = { "lidar_path": Path(lidar_path).relative_to(data_path).__str__(), "sample_data_token": curr_sd_rec["token"], "transform_matrix": tm, "global_from_car": global_from_car, "car_from_current": car_from_current, "time_lag": time_lag, } sweeps.append(sweep) info["sweeps"] = sweeps assert len(info["sweeps"]) == max_sweeps - 1, ( f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, " f"you should duplicate to sweep num {max_sweeps - 1}" ) if not test: annotations = [ nusc.get("sample_annotation", token) for token in sample["anns"] ] # the filtering gives 0.5~1 map improvement num_lidar_pts = np.array([anno["num_lidar_pts"] for anno in annotations]) num_radar_pts = np.array([anno["num_radar_pts"] for anno in annotations]) mask = num_lidar_pts + num_radar_pts > 0 locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[ :, [1, 0, 2] ] # wlh == > dxdydz (lwh) velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3) rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape( -1, 1 ) names = np.array([b.name for b in ref_boxes]) tokens = np.array([b.token for b in ref_boxes]) gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1) assert len(annotations) == len(gt_boxes) == len(velocity) info["gt_boxes"] = gt_boxes[mask, :] info["gt_boxes_velocity"] = velocity[mask, :] info["gt_names"] = np.array( [map_name_from_general_to_detection[name] for name in names] )[mask] info["gt_boxes_token"] = tokens[mask] info["num_lidar_pts"] = num_lidar_pts[mask] info["num_radar_pts"] = num_radar_pts[mask] if sample["scene_token"] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) progress_bar.close() return train_nusc_infos, val_nusc_infos def boxes_lidar_to_nusenes(det_info): boxes3d = det_info["boxes_lidar"] scores = det_info["score"] labels = det_info["pred_labels"] box_list = [] for k in range(boxes3d.shape[0]): quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6]) velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0) box = Box( boxes3d[k, :3], boxes3d[k, [4, 3, 5]], # wlh quat, label=labels[k], score=scores[k], velocity=velocity, ) box_list.append(box) return box_list def lidar_nusc_box_to_global(nusc, boxes, sample_token): s_record = nusc.get("sample", sample_token) sample_data_token = s_record["data"]["LIDAR_TOP"] sd_record = nusc.get("sample_data", sample_data_token) cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"]) sensor_record = nusc.get("sensor", cs_record["sensor_token"]) pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"]) data_path = nusc.get_sample_data_path(sample_data_token) box_list = [] for box in boxes: # Move box to ego vehicle coord system box.rotate(Quaternion(cs_record["rotation"])) box.translate(np.array(cs_record["translation"])) # Move box to global coord system box.rotate(Quaternion(pose_record["rotation"])) box.translate(np.array(pose_record["translation"])) box_list.append(box) return box_list def transform_det_annos_to_nusc_annos(det_annos, nusc): nusc_annos = { "results": {}, "meta": None, } for det in det_annos: annos = [] box_list = boxes_lidar_to_nusenes(det) box_list = lidar_nusc_box_to_global( nusc=nusc, boxes=box_list, sample_token=det["metadata"]["token"] ) for k, box in enumerate(box_list): name = det["name"][k] if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2: if name in ["car", "construction_vehicle", "bus", "truck", "trailer"]: attr = "vehicle.moving" elif name in ["bicycle", "motorcycle"]: attr = "cycle.with_rider" else: attr = None else: if name in ["pedestrian"]: attr = "pedestrian.standing" elif name in ["bus"]: attr = "vehicle.stopped" else: attr = None attr = ( attr if attr is not None else max(cls_attr_dist[name].items(), key=operator.itemgetter(1))[0] ) nusc_anno = { "sample_token": det["metadata"]["token"], "translation": box.center.tolist(), "size": box.wlh.tolist(), "rotation": box.orientation.elements.tolist(), "velocity": box.velocity[:2].tolist(), "detection_name": name, "detection_score": box.score, "attribute_name": attr, } annos.append(nusc_anno) nusc_annos["results"].update({det["metadata"]["token"]: annos}) return nusc_annos def format_nuscene_results(metrics, class_names, version="default"): result = "----------------Nuscene %s results-----------------\n" % version for name in class_names: threshs = ", ".join(list(metrics["label_aps"][name].keys())) ap_list = list(metrics["label_aps"][name].values()) err_name = ", ".join( [x.split("_")[0] for x in list(metrics["label_tp_errors"][name].keys())] ) error_list = list(metrics["label_tp_errors"][name].values()) result += f"***{name} error@{err_name} | AP@{threshs}\n" result += ", ".join(["%.2f" % x for x in error_list]) + " | " result += ", ".join(["%.2f" % (x * 100) for x in ap_list]) result += f" | mean AP: {metrics['mean_dist_aps'][name]}" result += "\n" result += "--------------average performance-------------\n" details = {} for key, val in metrics["tp_errors"].items(): result += "%s:\t %.4f\n" % (key, val) details[key] = val result += "mAP:\t %.4f\n" % metrics["mean_ap"] result += "NDS:\t %.4f\n" % metrics["nd_score"] details.update( { "mAP": metrics["mean_ap"], "NDS": metrics["nd_score"], } ) return result, details
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/processor/data_processor.py
Python
from functools import partial import numpy as np from pcdet.utils import box_utils, common_utils class DataProcessor(object): def __init__(self, processor_configs, point_cloud_range, training): self.point_cloud_range = point_cloud_range self.training = training self.mode = "train" if training else "test" self.grid_size = self.voxel_size = None self.data_processor_queue = [] for cur_cfg in processor_configs: cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg) self.data_processor_queue.append(cur_processor) def mask_boxes_outside_length(self, data_dict=None, config=None): if data_dict is None: return partial(self.mask_boxes_outside_length, config=config) min_mask = data_dict["gt_boxes"][:, 3] >= config["LENGTH_RANGE"][0] max_mask = data_dict["gt_boxes"][:, 3] <= config["LENGTH_RANGE"][1] mask = min_mask & max_mask data_dict["gt_boxes"] = data_dict["gt_boxes"][mask] if "obj_ids" in data_dict: data_dict["obj_ids"] = data_dict["obj_ids"][mask] return data_dict def mask_points_and_boxes_outside_range(self, data_dict=None, config=None): if data_dict is None: return partial(self.mask_points_and_boxes_outside_range, config=config) mask = common_utils.mask_points_by_range( data_dict["points"], self.point_cloud_range ) data_dict["points"] = data_dict["points"][mask] if ( data_dict.get("gt_boxes", None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training ): mask = box_utils.mask_boxes_outside_range_numpy( data_dict["gt_boxes"], self.point_cloud_range, min_num_corners=config.get("min_num_corners", 1), ) data_dict["gt_boxes"] = data_dict["gt_boxes"][mask] if "obj_ids" in data_dict: data_dict["obj_ids"] = data_dict["obj_ids"][mask] return data_dict def shuffle_points(self, data_dict=None, config=None): if data_dict is None: return partial(self.shuffle_points, config=config) if config.SHUFFLE_ENABLED[self.mode]: points = data_dict["points"] shuffle_idx = np.random.permutation(points.shape[0]) points = points[shuffle_idx] data_dict["points"] = points return data_dict def transform_points_to_voxels( self, data_dict=None, config=None, voxel_generator=None ): if data_dict is None: try: from spconv.utils import VoxelGeneratorV2 as VoxelGenerator except: from spconv.utils import VoxelGenerator voxel_generator = VoxelGenerator( voxel_size=config.VOXEL_SIZE, point_cloud_range=self.point_cloud_range, max_num_points=config.MAX_POINTS_PER_VOXEL, max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode], ) grid_size = ( self.point_cloud_range[3:6] - self.point_cloud_range[0:3] ) / np.array(config.VOXEL_SIZE) self.grid_size = np.round(grid_size).astype(np.int64) self.voxel_size = config.VOXEL_SIZE return partial( self.transform_points_to_voxels, voxel_generator=voxel_generator ) points = data_dict["points"] voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = ( voxel_output["voxels"], voxel_output["coordinates"], voxel_output["num_points_per_voxel"], ) else: voxels, coordinates, num_points = voxel_output if not data_dict["use_lead_xyz"]: voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) data_dict["voxels"] = voxels data_dict["voxel_coords"] = coordinates data_dict["voxel_num_points"] = num_points return data_dict def sample_points(self, data_dict=None, config=None): if data_dict is None: return partial(self.sample_points, config=config) num_points = config.NUM_POINTS[self.mode] if num_points == -1: return data_dict points = data_dict["points"] if num_points < len(points): pts_depth = np.linalg.norm(points[:, 0:3], axis=1) pts_near_flag = pts_depth < 40.0 far_idxs_choice = np.where(pts_near_flag == 0)[0] near_idxs = np.where(pts_near_flag == 1)[0] choice = [] if num_points > len(far_idxs_choice): near_idxs_choice = np.random.choice( near_idxs, num_points - len(far_idxs_choice), replace=False ) choice = ( np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) if len(far_idxs_choice) > 0 else near_idxs_choice ) else: choice = np.arange(0, len(points), dtype=np.int32) choice = np.random.choice(choice, num_points, replace=False) np.random.shuffle(choice) else: choice = np.arange(0, len(points), dtype=np.int32) if num_points > len(points): extra_choice = np.random.choice( choice, num_points - len(points), replace=False ) choice = np.concatenate((choice, extra_choice), axis=0) np.random.shuffle(choice) data_dict["points"] = points[choice] return data_dict def forward(self, data_dict): """ Args: data_dict: points: (N, 3 + C_in) gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] gt_names: optional, (N), string ... Returns: """ for cur_processor in self.data_processor_queue: data_dict = cur_processor(data_dict=data_dict) return data_dict def eval(self): self.training = False self.mode = "test" def train(self): self.training = True self.mode = "train"
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/processor/point_feature_encoder.py
Python
import numpy as np class PointFeatureEncoder(object): def __init__(self, config, point_cloud_range=None): super().__init__() self.point_encoding_config = config assert list(self.point_encoding_config.src_feature_list[0:3]) == ["x", "y", "z"] self.used_feature_list = self.point_encoding_config.used_feature_list self.src_feature_list = self.point_encoding_config.src_feature_list self.point_cloud_range = point_cloud_range @property def num_point_features(self): return getattr(self, self.point_encoding_config.encoding_type)(points=None) def forward(self, data_dict): """ Args: data_dict: points: (N, 3 + C_in) ... Returns: data_dict: points: (N, 3 + C_out), use_lead_xyz: whether to use xyz as point-wise features ... """ data_dict["points"], use_lead_xyz = getattr( self, self.point_encoding_config.encoding_type )(data_dict["points"]) data_dict["use_lead_xyz"] = use_lead_xyz return data_dict def absolute_coordinates_encoding(self, points=None): if points is None: num_output_features = len(self.used_feature_list) return num_output_features point_feature_list = [points[:, 0:3]] for x in self.used_feature_list: if x in ["x", "y", "z"]: continue idx = self.src_feature_list.index(x) point_feature_list.append(points[:, idx : idx + 1]) point_features = np.concatenate(point_feature_list, axis=1) return point_features, True
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/waymo/waymo_dataset.py
Python
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset # Reference https://github.com/open-mmlab/OpenPCDet # Written by Shaoshuai Shi, Chaoxu Guo # All Rights Reserved 2019-2020. import argparse import copy import multiprocessing import os import pickle from pathlib import Path import numpy as np import torch from tqdm import tqdm from lit.path_utils import get_lit_paths from pcdet.datasets.dataset import DatasetTemplate from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.utils import box_utils, common_utils class WaymoDataset(DatasetTemplate): _all_lidar_names = ["TOP", "FRONT", "SIDE_LEFT", "SIDE_RIGHT", "REAR"] def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None, allow_empty_gt_boxes=False, include_extras=False, ): """ Args: include_extras: include extra info in __getitem__ which may not be able to transfer to CUDA. This is only used for extracting Scene data. """ super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, allow_empty_gt_boxes=allow_empty_gt_boxes, ) self.include_extras = include_extras self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG self.split = self.dataset_cfg.DATA_SPLIT[self.mode] split_dir = ( self.root_path.parent.parent / "data_split" / "waymo" / (self.split + ".txt") ) self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.extra_infos = [] self.include_waymo_data(self.mode) # Check if we shall use simulated data. # E.g., set dataset_cfg.DST_STYLE to "kitti" to use simulated # nuScenes data with KITTI style. self.dst_style = self.dataset_cfg.get("DST_STYLE", None) # Get Waymo dataset paths. if self.dst_style is not None: lit_paths = get_lit_paths( data_version=self.dataset_cfg.DATA_VERSION, data_domain="waymo", ) # Not all training frames have simulated data, thus the self.infos may # not have the same length as before. if self.dst_style == "kitti": new_infos = [] # Ref init_paths.py self.dst_data_path = lit_paths.to_kitti_sim_frame_dir # Only a subset of self.infos has a corresponding simulation in # the sim_frame_dir. That is, we shall check # self.infos[i]["point_cloud"]["lidar_sequence"] # and see if it has a corresponding folder in sim_frame_dir. new_infos = [] new_extra_infos = [] for info, extra_info in zip(self.infos, self.extra_infos): sequence_name = info["point_cloud"]["lidar_sequence"] if sequence_name in lit_paths.scene_list: sim_dir = self.dst_data_path / sequence_name if not sim_dir.exists(): print( f"[WARNING] {sim_dir} does not exist but it " f"is in scene_list for data version " f"{lit_paths.data_version}" ) new_infos.append(info) new_extra_infos.append(extra_info) print(f"Selected {len(new_infos)} from {len(self.infos)} infos") self.infos = new_infos self.extra_infos = new_extra_infos elif self.dst_style == "nuscenes": new_infos = [] # Ref init_paths.py self.dst_data_path = lit_paths.to_nuscenes_sim_frame_dir # Only a subset of self.infos has a corresponding simulation in # the sim_frame_dir. That is, we shall check # self.infos[i]["point_cloud"]["lidar_sequence"] # and see if it has a corresponding folder in sim_frame_dir. new_infos = [] new_extra_infos = [] for info, extra_info in zip(self.infos, self.extra_infos): sequence_name = info["point_cloud"]["lidar_sequence"] if sequence_name in lit_paths.scene_list: sim_dir = self.dst_data_path / sequence_name if not sim_dir.exists(): print( f"[WARNING] " f"{sim_dir} does not exist but it " f"is in scene_list for data version " f"{lit_paths.data_version}" ) new_infos.append(info) new_extra_infos.append(extra_info) print(f"Selected {len(new_infos)} from {len(self.infos)} infos") self.infos = new_infos self.extra_infos = new_extra_infos elif self.dst_style == "v0": self.dst_data_path = None # print(f"self.info before subsampling: {len(self.infos)}") # self.infos = self.infos[::5] # self.extra_infos = self.extra_infos[::5] # print(f"self.info after subsampling : {len(self.infos)}") elif self.dst_style == None: self.dst_data_path = None else: raise ValueError(f"dst_style {self.dst_style} not implemented") # For debugging, clip the info to first 128. # self.infos = self.infos[:128] # self.extra_infos = self.extra_infos[:128] # self.infos = self.infos[: int(len(self.infos) * 0.1)] # self.extra_infos = self.extra_infos[: int(len(self.extra_infos) * 0.1)] def set_split(self, split): """ Only needed if you want to change the split after initialization. """ super().__init__( dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger, ) self.split = split split_dir = ( self.root_path.parent.parent / "data_split" / "waymo" / f"{self.split}.txt" ) self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.extra_infos = [] self.include_waymo_data(self.mode) def include_waymo_data(self, mode): self.logger.info("Loading Waymo dataset") waymo_infos = [] extra_waymo_infos = [] num_skipped_infos = 0 for k in range(len(self.sample_sequence_list)): # Load infos. sequence_name = os.path.splitext(self.sample_sequence_list[k])[0] info_path = self.data_path / sequence_name / ("%s.pkl" % sequence_name) info_path = self.check_sequence_name_with_all_version(info_path) if not info_path.exists(): num_skipped_infos += 1 continue # This also skips the extra infos. with open(info_path, "rb") as f: infos = pickle.load(f) waymo_infos.extend(infos) # Load extra infos. # For simplicity, we assume that the extra infos are always available. extra_info_dir = self.data_path.parent / f"{self.data_path.name}_extra" extra_info_path = extra_info_dir / f"{sequence_name}.pkl" if not extra_info_path.exists(): # TODO: enable this check after pre-processing all data. # raise FileNotFoundError( # f"{info_path} exists but {extra_info_path} does not exist." # ) continue with open(extra_info_path, "rb") as f: extra_infos = pickle.load(f) extra_waymo_infos.extend(extra_infos) self.infos.extend(waymo_infos[:]) self.extra_infos.extend(extra_waymo_infos[:]) self.logger.info("Total skipped info %s" % num_skipped_infos) self.logger.info("Total samples for Waymo dataset: %d" % (len(waymo_infos))) if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1: sampled_waymo_infos = [] for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]): sampled_waymo_infos.append(self.infos[k]) self.infos = sampled_waymo_infos self.logger.info( "Total sampled samples for Waymo dataset: %d" % len(self.infos) ) sampled_extra_waymo_infos = [] for k in range( 0, len(self.extra_infos), self.dataset_cfg.SAMPLED_INTERVAL[mode] ): sampled_extra_waymo_infos.append(self.extra_infos[k]) self.extra_infos = sampled_extra_waymo_infos self.logger.info( "Total sampled extra samples for Waymo dataset: %d" % len(self.extra_infos) ) @staticmethod def check_sequence_name_with_all_version(sequence_file): if ( "_with_camera_labels" not in str(sequence_file) and not sequence_file.exists() ): sequence_file = Path( str(sequence_file)[:-9] + "_with_camera_labels.tfrecord" ) if "_with_camera_labels" in str(sequence_file) and not sequence_file.exists(): sequence_file = Path(str(sequence_file).replace("_with_camera_labels", "")) return sequence_file def get_infos( self, raw_data_path, save_path, num_workers=None, has_label=True, sampled_interval=1, enable_only_save_lidar_poses=False, ): """ num_workers: int, number of threads to be used for data preprocessing. - None: use all CPU cores. - 0: no parallelization. - 1: parallelization with one thread, which is different from 0. """ import concurrent.futures as futures from functools import partial from pcdet.datasets.waymo import waymo_utils print( "---------------The waymo sample interval is %d, total sequecnes is %d-----------------" % (sampled_interval, len(self.sample_sequence_list)) ) process_single_sequence = partial( waymo_utils.process_single_sequence, save_path=save_path, sampled_interval=sampled_interval, has_label=has_label, enable_only_save_lidar_poses=enable_only_save_lidar_poses, ) sample_sequence_file_list = [ self.check_sequence_name_with_all_version(raw_data_path / sequence_file) for sequence_file in self.sample_sequence_list ] if num_workers is None: num_workers = multiprocessing.cpu_count() if num_workers > 0: with futures.ThreadPoolExecutor(num_workers) as executor: sequence_infos = list( tqdm( executor.map( process_single_sequence, sample_sequence_file_list ), total=len(sample_sequence_file_list), ) ) # Equivalent to: # for infos in sequence_infos: # all_sequences_infos.extend(infos) all_sequences_infos = [item for infos in sequence_infos for item in infos] else: # Single thread. all_sequences_infos = [] for sequence_file in tqdm(sample_sequence_file_list): sequence_info = process_single_sequence(sequence_file) all_sequences_infos.extend(sequence_info) return all_sequences_infos def get_lidar(self, sequence_name, sample_idx, info): """ Returns: points_all: (N, 5) [x, y, z, intensity, elongation] num_points_of_each_lidar: (5,), the updated info["num_points_of_each_lidar"] """ if self.dst_data_path is None: # (N, 6): [x, y, z, intensity, elongation, NLZ_flag] lidar_file = self.data_path / sequence_name / ("%04d.npy" % sample_idx) point_features = np.load(lidar_file) # For each lidar, compute start/end indices. num_lidars = len(self._all_lidar_names) num_points_of_each_lidar = info["num_points_of_each_lidar"] assert len(num_points_of_each_lidar) == num_lidars end_indices = np.cumsum(num_points_of_each_lidar) # Exclusive. start_indices = np.concatenate([[0], end_indices[:-1]]) # Inclusive. # For each lidar, compute the number of points with NLZ_flag == -1 points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5] num_points_of_each_lidar = [ np.sum( NLZ_flag[start_indices[lidar_idx] : end_indices[lidar_idx]] == -1 ) for lidar_idx in range(num_lidars) ] # Filter all with NLZ_flag == -1 points_all = points_all[NLZ_flag == -1] points_all[:, 3] = np.tanh(points_all[:, 3]) if len(points_all) != np.sum(num_points_of_each_lidar): raise ValueError( f"len(points_all) != np.sum(num_points_of_each_lidar): " f"{len(points_all)} != {np.sum(num_points_of_each_lidar)}" ) else: lidar_file_npy = ( self.dst_data_path / sequence_name / ("%04d.npy" % sample_idx) ) lidar_file_npz = ( self.dst_data_path / sequence_name / ("%04d.npz" % sample_idx) ) if lidar_file_npy.is_file(): # (N, 3) point_features = np.load(lidar_file_npy) elif lidar_file_npz.is_file(): # (N, 3) point_features = np.load(lidar_file_npz)["local_points"] else: raise (f"Both {lidar_file_npy} and {lidar_file_npz} are not found") # (N, 3) -> (N, 5) by appending intensity and elongation as zeros. points_all = np.concatenate( [point_features, np.zeros([len(point_features), 2])], axis=1 ) # All points are from the top lidar. num_points_of_each_lidar = [len(points_all), 0, 0, 0, 0] return points_all, num_points_of_each_lidar def __len__(self): if self._merge_all_iters_to_one_epoch: return len(self.infos) * self.total_epochs return len(self.infos) def __getitem__(self, index): if self._merge_all_iters_to_one_epoch: index = index % len(self.infos) info = copy.deepcopy(self.infos[index]) extra_info = copy.deepcopy(self.extra_infos[index]) pc_info = info["point_cloud"] sequence_name = pc_info["lidar_sequence"] sample_idx = pc_info["sample_idx"] points, num_points_of_each_lidar = self.get_lidar( sequence_name, sample_idx, info ) # Sanity checks. assert pc_info["lidar_sequence"] == extra_info["sequence_name"] assert pc_info["sample_idx"] == extra_info["sample_idx"] np.testing.assert_allclose(info["pose"], extra_info["frame_pose"]) # Per-lidar pose. num_lidars = len(num_points_of_each_lidar) lidar_to_vehicle_poses = extra_info["lidar_to_vehicle_poses"] assert len(lidar_to_vehicle_poses) == num_lidars input_dict = { "points": points, "frame_id": info["frame_id"], "sample_idx": sample_idx, } if "annos" in info: annos = info["annos"] annos = common_utils.drop_info_with_name(annos, name="unknown") if self.dataset_cfg.get("INFO_WITH_FAKELIDAR", False): gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar( annos["gt_boxes_lidar"] ) else: gt_boxes_lidar = annos["gt_boxes_lidar"] input_dict.update( { "gt_names": annos["name"], "gt_boxes": gt_boxes_lidar, "num_points_in_gt": annos.get("num_points_in_gt", None), } ) if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: input_dict["gt_boxes"] = None if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: input_dict["gt_boxes"] = None # for debug only # gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_) # debug_dict = {'gt_boxes': copy.deepcopy(gt_boxes_lidar[gt_boxes_mask])} if self.dataset_cfg.get("FOV_POINTS_ONLY", None): input_dict["points"] = self.extract_fov_data( input_dict["points"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) if input_dict["gt_boxes"] is not None: fov_gt_flag = self.extract_fov_gt( input_dict["gt_boxes"], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE, ) input_dict.update( { "gt_names": input_dict["gt_names"][fov_gt_flag], "gt_boxes": input_dict["gt_boxes"][fov_gt_flag], "num_points_in_gt": ( input_dict["num_points_in_gt"][fov_gt_flag] if input_dict["num_points_in_gt"] is not None else None ), } ) # load saved pseudo label for unlabeled data if self.dataset_cfg.get("USE_PSEUDO_LABEL", None) and self.training: self.fill_pseudo_labels(input_dict) # Insert properties first, as self.prepare_data may recursively call __getitem__. input_dict["sequence_name"] = sequence_name input_dict["pose"] = info["pose"] input_dict["lidar_to_vehicle_poses"] = lidar_to_vehicle_poses input_dict["num_points_of_each_lidar"] = num_points_of_each_lidar # Updated input_dict["obj_ids"] = annos["obj_ids"] data_dict = self.prepare_data(data_dict=input_dict) data_dict["metadata"] = info.get("metadata", info["frame_id"]) data_dict.pop("num_points_in_gt", None) # Extra info. if self.include_extras: obj_ids = [str(obj_id) for obj_id in data_dict["obj_ids"]] data_dict["obj_ids"] = obj_ids else: if "obj_ids" in data_dict: data_dict.pop("obj_ids") if "metadata" in data_dict: data_dict.pop("metadata") if "sequence_name" in data_dict: data_dict.pop("sequence_name") return data_dict @staticmethod def generate_prediction_dicts( batch_dict, pred_dicts, class_names, output_path=None ): """ Args: batch_dict: frame_id: pred_dicts: list of pred_dicts pred_boxes: (N, 7), Tensor pred_scores: (N), Tensor pred_labels: (N), Tensor class_names: output_path: Returns: """ def get_template_prediction(num_samples): ret_dict = { "name": np.zeros(num_samples), "score": np.zeros(num_samples), "boxes_lidar": np.zeros([num_samples, 7]), } return ret_dict def generate_single_sample_dict(box_dict): pred_scores = box_dict["pred_scores"].cpu().numpy() pred_boxes = box_dict["pred_boxes"].cpu().numpy() pred_labels = box_dict["pred_labels"].cpu().numpy() pred_dict = get_template_prediction(pred_scores.shape[0]) if pred_scores.shape[0] == 0: return pred_dict pred_dict["name"] = np.array(class_names)[pred_labels - 1] pred_dict["score"] = pred_scores pred_dict["boxes_lidar"] = pred_boxes return pred_dict annos = [] for index, box_dict in enumerate(pred_dicts): single_pred_dict = generate_single_sample_dict(box_dict) single_pred_dict["frame_id"] = batch_dict["frame_id"][index] single_pred_dict["metadata"] = batch_dict["metadata"][index] annos.append(single_pred_dict) return annos def evaluation(self, det_annos, class_names, **kwargs): if "annos" not in self.infos[0].keys(): return "No ground-truth boxes for evaluation", {} def kitti_eval(eval_det_annos, eval_gt_annos): from pcdet.datasets.kitti import kitti_utils from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval map_name_to_kitti = { "Vehicle": "Car", "Pedestrian": "Pedestrian", "Cyclist": "Cyclist", "Sign": "Sign", "Car": "Car", } kitti_utils.transform_annotations_to_kitti_format( eval_det_annos, map_name_to_kitti=map_name_to_kitti ) kitti_utils.transform_annotations_to_kitti_format( eval_gt_annos, map_name_to_kitti=map_name_to_kitti, info_with_fakelidar=self.dataset_cfg.get("INFO_WITH_FAKELIDAR", False), ) kitti_class_names = [map_name_to_kitti[x] for x in class_names] ap_result_str, ap_dict = kitti_eval.get_official_eval_result( gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names, ) return ap_result_str, ap_dict def waymo_eval(eval_det_annos, eval_gt_annos): from pcdet.datasets.waymo.waymo_eval import ( OpenPCDetWaymoDetectionMetricsEstimator, ) eval = OpenPCDetWaymoDetectionMetricsEstimator() ap_dict = eval.waymo_evaluation( eval_det_annos, eval_gt_annos, class_name=class_names, distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get("INFO_WITH_FAKELIDAR", False), ) ap_result_str = "\n" for key in ap_dict: ap_dict[key] = ap_dict[key][0] ap_result_str += "%s: %.4f \n" % (key, ap_dict[key]) return ap_result_str, ap_dict eval_det_annos = copy.deepcopy(det_annos) eval_gt_annos = [copy.deepcopy(info["annos"]) for info in self.infos] if kwargs["eval_metric"] == "kitti": ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos) elif kwargs["eval_metric"] == "waymo": ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos) else: raise NotImplementedError return ap_result_str, ap_dict def create_groundtruth_database( self, info_path, save_path, used_classes=None, split="train", sampled_interval=10, processed_data_tag=None, ): database_save_path = save_path / ( "pcdet_gt_database_%s_sampled_%d" % (split, sampled_interval) ) db_info_save_path = save_path / ( "pcdet_waymo_dbinfos_%s_sampled_%d.pkl" % (split, sampled_interval) ) database_save_path.mkdir(parents=True, exist_ok=True) all_db_infos = {} with open(info_path, "rb") as f: infos = pickle.load(f) for k in range(0, len(infos), sampled_interval): print("gt_database sample: %d/%d" % (k + 1, len(infos))) info = infos[k] pc_info = info["point_cloud"] sequence_name = pc_info["lidar_sequence"] sample_idx = pc_info["sample_idx"] points = self.get_lidar(sequence_name, sample_idx) annos = info["annos"] names = annos["name"] difficulty = annos["difficulty"] gt_boxes = annos["gt_boxes_lidar"] num_obj = gt_boxes.shape[0] box_idxs_of_pts = ( roiaware_pool3d_utils.points_in_boxes_gpu( torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda(), ) .long() .squeeze(dim=0) .cpu() .numpy() ) for i in range(num_obj): filename = "%s_%04d_%s_%d.bin" % ( sequence_name, sample_idx, names[i], i, ) filepath = database_save_path / filename gt_points = points[box_idxs_of_pts == i] gt_points[:, :3] -= gt_boxes[i, :3] if (used_classes is None) or names[i] in used_classes: with open(filepath, "w") as f: gt_points.tofile(f) db_path = str( filepath.relative_to(self.root_path) ) # gt_database/xxxxx.bin db_info = { "name": names[i], "path": db_path, "sequence_name": sequence_name, "sample_idx": sample_idx, "gt_idx": i, "box3d_lidar": gt_boxes[i], "num_points_in_gt": gt_points.shape[0], "difficulty": difficulty[i], } if names[i] in all_db_infos: all_db_infos[names[i]].append(db_info) else: all_db_infos[names[i]] = [db_info] for k, v in all_db_infos.items(): print("Database %s: %d" % (k, len(v))) with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) class MixedWaymoDataset(DatasetTemplate): """ Two Waymo Datasets mixed together. - dataset_a: The base dataset. This dataset will be randomly sampled according to the MIX_RATIO_A_TO_B. - dataset_b: The main dataset. Each epoch will make sure that all samples of dataset_b are used exactly once. """ def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None, allow_empty_gt_boxes=False, include_extras=False, ): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, allow_empty_gt_boxes=allow_empty_gt_boxes, ) dst_style = self.dataset_cfg.get("DST_STYLE", None) # To fill self.dataset_a = None self.dataset_b = None self.dataset_a_sampled_len = None self.dataset_len = None if dst_style == "v0_kitti": dataset_cfg_a = copy.deepcopy(dataset_cfg) dataset_cfg_a.DST_STYLE = "v0" self.dataset_a = WaymoDataset( dataset_cfg=dataset_cfg_a, class_names=class_names, training=training, root_path=root_path, logger=logger, allow_empty_gt_boxes=allow_empty_gt_boxes, include_extras=include_extras, ) dataset_cfg_b = copy.deepcopy(dataset_cfg) dataset_cfg_b.DST_STYLE = "kitti" self.dataset_b = WaymoDataset( dataset_cfg=dataset_cfg_b, class_names=class_names, training=training, root_path=root_path, logger=logger, allow_empty_gt_boxes=allow_empty_gt_boxes, include_extras=include_extras, ) # Compute length of the mixed dataset mix_ratio_a_to_b = self.dataset_cfg.get("MIX_RATIO_A_TO_B", None) if mix_ratio_a_to_b is None: raise ValueError("DATA_CONFIG.MIX_RATIO_A_TO_B is not set") self.dataset_a_sampled_len = int(len(self.dataset_b) * mix_ratio_a_to_b) self.dataset_len = self.dataset_a_sampled_len + len(self.dataset_b) print(f"Initialized MixedWaymoDataset:") print(f" - dst_style : {dst_style}") print(f" - Ratio A:B : {mix_ratio_a_to_b}") print(f" - Samples from A (v0) : {len(self.dataset_a)}") print(f" Sampled for A : {self.dataset_a_sampled_len}") print(f" - Samples from B (KITTI): {len(self.dataset_b)}") print(f" - Mixed Dataset Length : {self.dataset_len}") else: raise NotImplementedError(f"dst_style {self.dst_style} not implemented") def __len__(self): return self.dataset_len def __getitem__(self, index): if index < self.dataset_a_sampled_len: index_a = np.random.randint(0, len(self.dataset_a)) # print(f"dataset[{index}]: dataset_a[{index_a}]") return self.dataset_a[index_a] else: index_b = index - self.dataset_a_sampled_len # print(f"dataset[{index}]: dataset_b[{index_b}]") return self.dataset_b[index_b] class WaymoDatasetInfo(WaymoDataset): def __init__( self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger, ) def __getitem__(self, index): if self._merge_all_iters_to_one_epoch: index = index % len(self.infos) info = copy.deepcopy(self.infos[index]) pc_info = info["point_cloud"] sequence_name = pc_info["lidar_sequence"] sample_idx = pc_info["sample_idx"] input_dict = { "frame_id": info["frame_id"], "sample_idx": sample_idx, "sequence_name": sequence_name, } input_dict["metadata"] = info.get("metadata", info["frame_id"]) return input_dict def create_waymo_infos( dataset_cfg, class_names, data_path, save_path, raw_data_tag="raw_data", processed_data_tag="waymo_processed_data", workers=None, disable_train_info=False, disable_val_info=False, disable_gt_database=False, enable_only_save_lidar_poses=False, ): dataset = WaymoDataset( dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False, logger=common_utils.create_logger(), ) train_split, val_split = "train", "val" if enable_only_save_lidar_poses: train_filename = save_path / f"waymo_infos_{train_split}_extra.pkl" val_filename = save_path / f"waymo_infos_{val_split}_extra.pkl" else: train_filename = save_path / f"waymo_infos_{train_split}.pkl" val_filename = save_path / f"waymo_infos_{val_split}.pkl" ############################################################################ # Per-frame points (npy), per-sequence info (pkl), per-split info (pkl), # for training dataset. ############################################################################ if not disable_train_info: print("===============================================================") print("Generating per-frame points, per-seq info, per-split info (train)") print("===============================================================") dataset.set_split(train_split) waymo_infos_train = dataset.get_infos( raw_data_path=data_path / raw_data_tag, save_path=save_path / processed_data_tag, num_workers=workers, has_label=True, sampled_interval=1, enable_only_save_lidar_poses=enable_only_save_lidar_poses, ) with open(train_filename, "wb") as f: pickle.dump(waymo_infos_train, f) print(f"Waymo info train file is saved to {train_filename}") else: print("===============================================================") print("Skip generating per-frame points, per-seq info, per-split info (train)") print("===============================================================") ############################################################################ # Per-frame points (npy), per-sequence info (pkl), per-split info (pkl), # for validation dataset. ############################################################################ if not disable_val_info: print("===============================================================") print("Generating per-frame points, per-seq info, per-split info (val)") print("===============================================================") dataset.set_split(val_split) waymo_infos_val = dataset.get_infos( raw_data_path=data_path / raw_data_tag, save_path=save_path / processed_data_tag, num_workers=workers, has_label=True, sampled_interval=1, enable_only_save_lidar_poses=enable_only_save_lidar_poses, ) with open(val_filename, "wb") as f: pickle.dump(waymo_infos_val, f) print(f"Waymo info val file is saved to {val_filename}") else: print("===============================================================") print("Skip generating per-frame points, per-seq info, per-split info (val)") print("===============================================================") ############################################################################ # Ground-truth database for data augmentation. ############################################################################ if not disable_gt_database and not enable_only_save_lidar_poses: print("===============================================================") print("Generating ground-truth database (skip-10) for data augmentation") print("===============================================================") dataset.set_split(train_split) dataset.create_groundtruth_database( info_path=train_filename, save_path=save_path, split="train", sampled_interval=10, used_classes=["Vehicle", "Pedestrian", "Cyclist"], ) else: print("===============================================================") print("Skip generating ground-truth database (skip-10) for data augmentation") print("===============================================================") print("===============================================================") print("Data preparation Done") print("===============================================================") def main(): parser = argparse.ArgumentParser(description="arg parser") # Default arguments. parser.add_argument( "--cfg_file", type=str, default=None, help="specify the config of dataset", ) parser.add_argument( "--func", type=str, default="create_waymo_infos", help="", ) parser.add_argument( "--workers", type=int, default=None, help="None for using all CPU cores; 0 for disable parallel", ) # Arguments to disable part of the pipeline. parser.add_argument( "--disable_train_info", action="store_true", help="disable generating training info", ) parser.add_argument( "--disable_val_info", action="store_true", help="disable generating validation info", ) parser.add_argument( "--disable_gt_database", action="store_true", help="disable generating gt database (skip-10) for augmentation", ) # Arguments for extracting additional info. # We only do addition info extraction, and will not replace the original info. parser.add_argument( "--enable_only_save_lidar_poses", action="store_true", help="Extract lidar-to-vehicle pose only. All other processing are skipped. " "Will save to waymo/waymo_processed_data_extra/seq_name.pkl", ) args = parser.parse_args() if args.func == "create_waymo_infos": import yaml from easydict import EasyDict dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file))) ROOT_DIR = (Path(__file__).resolve().parent / "../../../").resolve() create_waymo_infos( dataset_cfg=dataset_cfg, class_names=["Vehicle", "Pedestrian", "Cyclist"], data_path=ROOT_DIR / "data" / "waymo", save_path=ROOT_DIR / "data" / "waymo", raw_data_tag="raw_data", processed_data_tag=dataset_cfg.PROCESSED_DATA_TAG, workers=args.workers, disable_train_info=args.disable_train_info, disable_val_info=args.disable_val_info, disable_gt_database=args.disable_gt_database, enable_only_save_lidar_poses=args.enable_only_save_lidar_poses, ) else: raise NotImplementedError(f"function {args.func} not implemented") if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/waymo/waymo_eval.py
Python
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset # Reference https://github.com/open-mmlab/OpenPCDet # Written by Shaoshuai Shi, Chaoxu Guo # All Rights Reserved 2019-2020. import argparse import pickle import numpy as np import tensorflow as tf from google.protobuf import text_format from waymo_open_dataset.metrics.python import detection_metrics from waymo_open_dataset.protos import metrics_pb2 tf.get_logger().setLevel("INFO") def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period class OpenPCDetWaymoDetectionMetricsEstimator(tf.test.TestCase): WAYMO_CLASSES = ["unknown", "Vehicle", "Pedestrian", "Truck", "Cyclist"] def generate_waymo_type_results( self, infos, class_names, is_gt=False, fake_gt_infos=True ): def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar): """ Args: boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center Returns: boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center """ w, l, h, r = ( boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7], ) boxes3d_lidar[:, 2] += h[:, 0] / 2 return np.concatenate( [boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1 ) frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty = ( [], [], [], [], [], [], ) for frame_index, info in enumerate(infos): if is_gt: box_mask = np.array( [n in class_names for n in info["name"]], dtype=np.bool_ ) if "num_points_in_gt" in info: zero_difficulty_mask = info["difficulty"] == 0 info["difficulty"][ (info["num_points_in_gt"] > 5) & zero_difficulty_mask ] = 1 info["difficulty"][ (info["num_points_in_gt"] <= 5) & zero_difficulty_mask ] = 2 nonzero_mask = info["num_points_in_gt"] > 0 box_mask = box_mask & nonzero_mask else: print( "Please provide the num_points_in_gt for evaluating on Waymo Dataset " "(If you create Waymo Infos before 20201126, please re-create the validation infos " "with version 1.2 Waymo dataset to get this attribute). SSS of OpenPCDet" ) raise NotImplementedError num_boxes = box_mask.sum() box_name = info["name"][box_mask] difficulty.append(info["difficulty"][box_mask]) score.append(np.ones(num_boxes)) if fake_gt_infos: info["gt_boxes_lidar"] = boxes3d_kitti_fakelidar_to_lidar( info["gt_boxes_lidar"] ) boxes3d.append(info["gt_boxes_lidar"][box_mask]) else: num_boxes = len(info["boxes_lidar"]) difficulty.append([0] * num_boxes) score.append(info["score"]) boxes3d.append(np.array(info["boxes_lidar"])) box_name = info["name"] obj_type += [ self.WAYMO_CLASSES.index(name) for i, name in enumerate(box_name) ] frame_id.append(np.array([frame_index] * num_boxes)) overlap_nlz.append(np.zeros(num_boxes)) # set zero currently frame_id = np.concatenate(frame_id).reshape(-1).astype(np.int64) boxes3d = np.concatenate(boxes3d, axis=0) obj_type = np.array(obj_type).reshape(-1) score = np.concatenate(score).reshape(-1) overlap_nlz = np.concatenate(overlap_nlz).reshape(-1) difficulty = np.concatenate(difficulty).reshape(-1).astype(np.int8) boxes3d[:, -1] = limit_period(boxes3d[:, -1], offset=0.5, period=np.pi * 2) return frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty def build_config(self): config = metrics_pb2.Config() config_text = """ breakdown_generator_ids: OBJECT_TYPE difficulties { levels:1 levels:2 } matcher_type: TYPE_HUNGARIAN iou_thresholds: 0.0 iou_thresholds: 0.7 iou_thresholds: 0.5 iou_thresholds: 0.5 iou_thresholds: 0.5 box_type: TYPE_3D """ for x in range(0, 100): config.score_cutoffs.append(x * 0.01) config.score_cutoffs.append(1.0) text_format.Merge(config_text, config) return config def build_graph(self, graph): with graph.as_default(): self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64) self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32) self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8) self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32) self._pd_overlap_nlz = tf.compat.v1.placeholder(dtype=tf.bool) self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64) self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32) self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8) self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8) metrics = detection_metrics.get_detection_metric_ops( config=self.build_config(), prediction_frame_id=self._pd_frame_id, prediction_bbox=self._pd_bbox, prediction_type=self._pd_type, prediction_score=self._pd_score, prediction_overlap_nlz=self._pd_overlap_nlz, ground_truth_bbox=self._gt_bbox, ground_truth_type=self._gt_type, ground_truth_frame_id=self._gt_frame_id, ground_truth_difficulty=self._gt_difficulty, ) return metrics def run_eval_ops( self, sess, graph, metrics, prediction_frame_id, prediction_bbox, prediction_type, prediction_score, prediction_overlap_nlz, ground_truth_frame_id, ground_truth_bbox, ground_truth_type, ground_truth_difficulty, ): sess.run( [tf.group([value[1] for value in metrics.values()])], feed_dict={ self._pd_bbox: prediction_bbox, self._pd_frame_id: prediction_frame_id, self._pd_type: prediction_type, self._pd_score: prediction_score, self._pd_overlap_nlz: prediction_overlap_nlz, self._gt_bbox: ground_truth_bbox, self._gt_type: ground_truth_type, self._gt_frame_id: ground_truth_frame_id, self._gt_difficulty: ground_truth_difficulty, }, ) def eval_value_ops(self, sess, graph, metrics): return {item[0]: sess.run([item[1][0]]) for item in metrics.items()} def mask_by_distance(self, distance_thresh, boxes_3d, *args): mask = np.linalg.norm(boxes_3d[:, 0:2], axis=1) < distance_thresh + 0.5 boxes_3d = boxes_3d[mask] ret_ans = [boxes_3d] for arg in args: ret_ans.append(arg[mask]) return tuple(ret_ans) def waymo_evaluation( self, prediction_infos, gt_infos, class_name, distance_thresh=100, fake_gt_infos=True, ): print("Start the waymo evaluation...") assert len(prediction_infos) == len(gt_infos), "%d vs %d" % ( prediction_infos.__len__(), gt_infos.__len__(), ) tf.compat.v1.disable_eager_execution() ( pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, _, ) = self.generate_waymo_type_results(prediction_infos, class_name, is_gt=False) ( gt_frameid, gt_boxes3d, gt_type, gt_score, gt_overlap_nlz, gt_difficulty, ) = self.generate_waymo_type_results( gt_infos, class_name, is_gt=True, fake_gt_infos=fake_gt_infos ) ( pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz, ) = self.mask_by_distance( distance_thresh, pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz ) ( gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty, ) = self.mask_by_distance( distance_thresh, gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty ) print("Number: (pd, %d) VS. (gt, %d)" % (len(pd_boxes3d), len(gt_boxes3d))) print( "Level 1: %d, Level2: %d)" % ((gt_difficulty == 1).sum(), (gt_difficulty == 2).sum()) ) if pd_score.max() > 1: # assert pd_score.max() <= 1.0, 'Waymo evaluation only supports normalized scores' pd_score = 1 / (1 + np.exp(-pd_score)) print("Warning: Waymo evaluation only supports normalized scores") graph = tf.Graph() metrics = self.build_graph(graph) with self.test_session(graph=graph) as sess: sess.run(tf.compat.v1.initializers.local_variables()) self.run_eval_ops( sess, graph, metrics, pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, gt_frameid, gt_boxes3d, gt_type, gt_difficulty, ) with tf.compat.v1.variable_scope("detection_metrics", reuse=True): aps = self.eval_value_ops(sess, graph, metrics) return aps def main(): parser = argparse.ArgumentParser(description="arg parser") parser.add_argument("--pred_infos", type=str, default=None, help="pickle file") parser.add_argument("--gt_infos", type=str, default=None, help="pickle file") parser.add_argument( "--class_names", type=str, nargs="+", default=["Vehicle", "Pedestrian", "Cyclist"], help="", ) parser.add_argument( "--sampled_interval", type=int, default=5, help="sampled interval for GT scenes", ) args = parser.parse_args() pred_infos = pickle.load(open(args.pred_infos, "rb")) gt_infos = pickle.load(open(args.gt_infos, "rb")) print("Start to evaluate the waymo format results...") eval = OpenPCDetWaymoDetectionMetricsEstimator() gt_infos_dst = [] for idx in range(0, len(gt_infos), args.sampled_interval): cur_info = gt_infos[idx]["annos"] cur_info["frame_id"] = gt_infos[idx]["frame_id"] gt_infos_dst.append(cur_info) waymo_AP = eval.waymo_evaluation( pred_infos, gt_infos_dst, class_name=args.class_names, distance_thresh=1000, fake_gt_infos=True, ) print(waymo_AP) if __name__ == "__main__": main()
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/waymo/waymo_utils.py
Python
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset # Reference https://github.com/open-mmlab/OpenPCDet # Written by Shaoshuai Shi, Chaoxu Guo # All Rights Reserved 2019-2020. import os import pickle import numpy as np import tensorflow as tf from waymo_open_dataset import dataset_pb2 from waymo_open_dataset.utils import frame_utils, range_image_utils, transform_utils from pcdet.utils import common_utils try: tf.enable_eager_execution() except: pass WAYMO_CLASSES = ["unknown", "Vehicle", "Pedestrian", "Sign", "Cyclist"] def generate_labels(frame): obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], [] tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], [] num_points_in_gt = [] laser_labels = frame.laser_labels for i in range(len(laser_labels)): box = laser_labels[i].box class_ind = laser_labels[i].type loc = [box.center_x, box.center_y, box.center_z] heading_angles.append(box.heading) obj_name.append(WAYMO_CLASSES[class_ind]) difficulty.append(laser_labels[i].detection_difficulty_level) tracking_difficulty.append(laser_labels[i].tracking_difficulty_level) dimensions.append( [box.length, box.width, box.height] ) # lwh in unified coordinate of OpenPCDet locations.append(loc) obj_ids.append(laser_labels[i].id) num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box) annotations = {} annotations["name"] = np.array(obj_name) annotations["difficulty"] = np.array(difficulty) annotations["dimensions"] = np.array(dimensions) annotations["location"] = np.array(locations) annotations["heading_angles"] = np.array(heading_angles) annotations["obj_ids"] = np.array(obj_ids) annotations["tracking_difficulty"] = np.array(tracking_difficulty) annotations["num_points_in_gt"] = np.array(num_points_in_gt) annotations = common_utils.drop_info_with_name(annotations, name="unknown") if annotations["name"].__len__() > 0: gt_boxes_lidar = np.concatenate( [ annotations["location"], annotations["dimensions"], annotations["heading_angles"][..., np.newaxis], ], axis=1, ) else: gt_boxes_lidar = np.zeros((0, 7)) annotations["gt_boxes_lidar"] = gt_boxes_lidar return annotations def convert_range_image_to_point_cloud( frame, range_images, camera_projections, range_image_top_pose, ri_index=0 ): """ Modified from the codes of Waymo Open Dataset. Convert range images to point cloud. Args: frame: open dataset frame range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}. camera_projections: A dict of {laser_name, [camera_projection_from_first_return, camera_projection_from_second_return]}. range_image_top_pose: range image pixel pose for top lidar. ri_index: 0 for the first return, 1 for the second return. Returns: points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars). cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars). """ calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name) points = [] cp_points = [] points_NLZ = [] points_intensity = [] points_elongation = [] frame_pose = tf.convert_to_tensor( np.reshape(np.array(frame.pose.transform), [4, 4]) ) # [H, W, 6] range_image_top_pose_tensor = tf.reshape( tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims ) # [H, W, 3, 3] range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix( range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1], range_image_top_pose_tensor[..., 2], ) range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:] range_image_top_pose_tensor = transform_utils.get_transform( range_image_top_pose_tensor_rotation, range_image_top_pose_tensor_translation ) for c in calibrations: range_image = range_images[c.name][ri_index] if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test beam_inclinations = range_image_utils.compute_inclination( tf.constant([c.beam_inclination_min, c.beam_inclination_max]), height=range_image.shape.dims[0], ) else: beam_inclinations = tf.constant(c.beam_inclinations) beam_inclinations = tf.reverse(beam_inclinations, axis=[-1]) extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) range_image_tensor = tf.reshape( tf.convert_to_tensor(range_image.data), range_image.shape.dims ) pixel_pose_local = None frame_pose_local = None if c.name == dataset_pb2.LaserName.TOP: pixel_pose_local = range_image_top_pose_tensor pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0) frame_pose_local = tf.expand_dims(frame_pose, axis=0) range_image_mask = range_image_tensor[..., 0] > 0 range_image_NLZ = range_image_tensor[..., 3] range_image_intensity = range_image_tensor[..., 1] range_image_elongation = range_image_tensor[..., 2] range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image( tf.expand_dims(range_image_tensor[..., 0], axis=0), tf.expand_dims(extrinsic, axis=0), tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0), pixel_pose=pixel_pose_local, frame_pose=frame_pose_local, ) range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) points_tensor = tf.gather_nd(range_image_cartesian, tf.where(range_image_mask)) points_NLZ_tensor = tf.gather_nd( range_image_NLZ, tf.compat.v1.where(range_image_mask) ) points_intensity_tensor = tf.gather_nd( range_image_intensity, tf.compat.v1.where(range_image_mask) ) points_elongation_tensor = tf.gather_nd( range_image_elongation, tf.compat.v1.where(range_image_mask) ) cp = camera_projections[c.name][0] cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims) cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask)) points.append(points_tensor.numpy()) cp_points.append(cp_points_tensor.numpy()) points_NLZ.append(points_NLZ_tensor.numpy()) points_intensity.append(points_intensity_tensor.numpy()) points_elongation.append(points_elongation_tensor.numpy()) return points, cp_points, points_NLZ, points_intensity, points_elongation def save_lidar_points(frame, cur_save_path): parsed_frame = frame_utils.parse_range_image_and_camera_projection(frame) if len(parsed_frame) == 4: # New API. ( range_images, camera_projections, seg_labels, range_image_top_pose, ) = parsed_frame else: # Old API. ( range_images, camera_projections, range_image_top_pose, ) = parsed_frame ( points, cp_points, points_in_NLZ_flag, points_intensity, points_elongation, ) = convert_range_image_to_point_cloud( frame, range_images, camera_projections, range_image_top_pose ) # 3d points in vehicle frame. points_all = np.concatenate(points, axis=0) points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1) points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1) points_elongation = np.concatenate(points_elongation, axis=0).reshape(-1, 1) num_points_of_each_lidar = [point.shape[0] for point in points] save_points = np.concatenate( [points_all, points_intensity, points_elongation, points_in_NLZ_flag], axis=-1 ).astype(np.float32) np.save(cur_save_path, save_points) # print('saving to ', cur_save_path) return num_points_of_each_lidar def process_single_sequence( scene_file, save_path, sampled_interval, has_label=True, enable_only_save_lidar_poses=False, ): # Complete separate code path for only saving lidar poses. # This will always run without checking if the scene has been processed. if enable_only_save_lidar_poses: return process_single_sequence_only_save_lidar_poses( scene_file=scene_file, save_path=save_path, sampled_interval=sampled_interval, has_label=has_label, ) sequence_name = os.path.splitext(os.path.basename(scene_file))[0] # print('Load record (sampled_interval=%d): %s' % (sampled_interval, sequence_name)) if not scene_file.exists(): print("NotFoundError: %s" % scene_file) return [] dataset = tf.data.TFRecordDataset(str(scene_file), compression_type="") cur_save_dir = save_path / sequence_name cur_save_dir.mkdir(parents=True, exist_ok=True) pkl_file = cur_save_dir / ("%s.pkl" % sequence_name) scene_infos = [] if pkl_file.exists(): scene_infos = pickle.load(open(pkl_file, "rb")) print("Skip scene since it has been processed before: %s" % pkl_file) return scene_infos for cnt, data in enumerate(dataset): if cnt % sampled_interval != 0: continue # print(sequence_name, cnt) frame = dataset_pb2.Frame() frame.ParseFromString(bytearray(data.numpy())) info = {} pc_info = { "num_features": 5, "lidar_sequence": sequence_name, "sample_idx": cnt, } info["point_cloud"] = pc_info info["frame_id"] = sequence_name + ("_%03d" % cnt) image_info = {} for j in range(5): width = frame.context.camera_calibrations[j].width height = frame.context.camera_calibrations[j].height image_info.update({"image_shape_%d" % j: (height, width)}) info["image"] = image_info # Save the intrinsics, 4x4 extrinsic matrix, width, and height of each camera. save_waymo_calibrations = False if save_waymo_calibrations: clib_dict = dict() for c in frame.context.camera_calibrations: cam_name_str = dataset_pb2.CameraName.Name.Name(c.name) clib_dict[f"CAM_{cam_name_str}_INTRINSIC"] = np.array( c.intrinsic, np.float32 ) clib_dict[f"CAM_{cam_name_str}_EXTRINSIC"] = np.reshape( np.array(c.extrinsic.transform, np.float32), [4, 4] ) clib_dict[f"CAM_{cam_name_str}_WIDTH"] = np.array(c.width) clib_dict[f"CAM_{cam_name_str}_HEIGHT"] = np.array(c.height) clib_dict[f"CAM_{cam_name_str}_ROLLING_SHUTTER_DIRECTION"] = np.array( c.rolling_shutter_direction ) for l in frame.context.laser_calibrations: lidar_name_str = dataset_pb2.LaserName.Name.Name(l.name) clib_dict[f"LIDAR_{lidar_name_str}_EXTRINSIC"] = np.reshape( np.array(l.extrinsic.transform, np.float32), [4, 4] ) save_path = "waymo_calibrations.pkl" pickle.dump(clib_dict, open(save_path, "wb")) print(f"Calibrations are saved to {save_path}") exit(0) pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4) info["pose"] = pose if has_label: annotations = generate_labels(frame) info["annos"] = annotations num_points_of_each_lidar = save_lidar_points( frame, cur_save_dir / ("%04d.npy" % cnt) ) info["num_points_of_each_lidar"] = num_points_of_each_lidar scene_infos.append(info) with open(pkl_file, "wb") as f: pickle.dump(scene_infos, f) print("Infos are saved to (sampled_interval=%d): %s" % (sampled_interval, pkl_file)) return scene_infos def process_single_sequence_only_save_lidar_poses( scene_file, save_path, sampled_interval, has_label=True, ): """ Extra info will be save to: f"{save_path}_extra" / f"{sequence_name}.pkl" """ # Paths. sequence_name = os.path.splitext(os.path.basename(scene_file))[0] pkl_dir = save_path.parent / f"{save_path.name}_extra" pkl_path = save_path.parent / f"{save_path.name}_extra" / f"{sequence_name}.pkl" pkl_dir.mkdir(parents=True, exist_ok=True) # Check input. if not scene_file.exists(): print("NotFoundError: %s" % scene_file) return [] # Check output. scene_infos = [] if pkl_path.exists(): scene_infos = pickle.load(open(pkl_path, "rb")) print("Skip scene since it has been processed before: %s" % pkl_path) return scene_infos dataset = tf.data.TFRecordDataset(str(scene_file), compression_type="") for cnt, data in enumerate(dataset): if cnt % sampled_interval != 0: continue frame = dataset_pb2.Frame() frame.ParseFromString(bytearray(data.numpy())) # TOP: 1, FRONT: 2, SIDE_LEFT: 3, SIDE_RIGHT: 4, REAR: 5 calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name) lidar_poses = [ np.reshape(np.array(c.extrinsic.transform, np.float32), [4, 4]) for c in calibrations ] frame_pose = np.array(frame.pose.transform, np.float32).reshape(4, 4) info = {} info["sequence_name"] = sequence_name info["sample_idx"] = cnt info["frame_id"] = sequence_name + ("_%03d" % cnt) info["lidar_to_vehicle_poses"] = lidar_poses info["frame_pose"] = frame_pose scene_infos.append(info) with open(pkl_path, "wb") as f: pickle.dump(scene_infos, f) return scene_infos
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/datasets/wayscenes/wayscenes_dataset.py
Python
import numpy as np from torch.utils.data import Dataset from pcdet.datasets.dataset import DatasetTemplate from pcdet.datasets.nuscenes.nuscenes_dataset import NuScenesDataset from pcdet.datasets.waymo.waymo_dataset import WaymoDataset class WayScenesDataset(DatasetTemplate): def __init__( self, waymo_cfg, nuscenes_cfg, training=True, root_path=None, logger=None, ): """ Initializes WayScenes dataset which contains instances of WaymoDataset and NuScenesDataset. Args: waymo_cfg: Configuration for Waymo dataset. nuscenes_cfg: Configuration for NuScenes dataset. training: Boolean indicating if the dataset is used for training. root_path: Root directory path where datasets are stored. logger: Logger for logging purposes. """ # Initialize Waymo dataset self.waymo_dataset = WaymoDataset( dataset_cfg=waymo_cfg, class_names=waymo_cfg.CLASS_NAMES, training=training, root_path=root_path, logger=logger, ) # Initialize NuScenes dataset self.nuscenes_dataset = NuScenesDataset( dataset_cfg=nuscenes_cfg, class_names=nuscenes_cfg.CLASS_NAMES, training=training, root_path=root_path, logger=logger, ) # Compute the dataset length based on WAYMO_TO_NUSCENES_RATIO self.waymo_sampled_length = int( len(self.nuscenes_dataset) * waymo_cfg.WAYMO_TO_NUSCENES_RATIO ) self.total_length = self.waymo_sampled_length + len(self.nuscenes_dataset) print("[WayScenes Dataset]") print(f"- Total samples per epoch : {self.total_length}") print(f"- Waymo samples per epoch : {self.waymo_sampled_length}") print(f"- NuScenes samples per epoch: {len(self.nuscenes_dataset)}") # Manually export some shared attributes. ############################## # - Ideally, this shall not be necessary. A Dataset class shall only be # responsible for providing data via __getitem__ and __len__ methods. # - However, in OpenPCDet, a Dataset class's attributes are also used # to build the network architecture. # - Here, we first check if the shared attributes are the same for both # datasets. If they are, we export them. If not, we pick one of the # values manually. ######################################################################## # class_names assert self.waymo_dataset.class_names == self.nuscenes_dataset.class_names self.class_names = self.waymo_dataset.class_names # point_feature_encoder # manually checked POINT_FEATURE_ENCODING in: # - tools/cfgs/dataset_configs/da_waymo_dataset.yaml # - tools/cfgs/dataset_configs/da_nuscenes_kitti_dataset.yaml self.point_feature_encoder = self.waymo_dataset.point_feature_encoder # grid_size assert np.allclose( self.waymo_dataset.grid_size, self.nuscenes_dataset.grid_size, ) self.grid_size = self.waymo_dataset.grid_size # point_cloud_range assert np.allclose( self.waymo_dataset.point_cloud_range, self.nuscenes_dataset.point_cloud_range, ) self.point_cloud_range = self.waymo_dataset.point_cloud_range # voxel_size assert np.allclose( self.waymo_dataset.voxel_size, self.nuscenes_dataset.voxel_size, ) self.voxel_size = self.waymo_dataset.voxel_size # dataset_cfg # This is really bad, but it is required for SECOND self.dataset_cfg = self.waymo_dataset.dataset_cfg ######################################################################## def __len__(self): return self.total_length def __getitem__(self, index): if index < self.waymo_sampled_length: waymo_index = np.random.randint(0, len(self.waymo_dataset)) # print(f"WayScenes[{index:05d}]: Waymo[{waymo_index:05d}]") return self.waymo_dataset[waymo_index] else: nuscenes_index = index - self.waymo_sampled_length # print(f"WayScenes[{index:05d}]: NuScenes[{nuscenes_index:05d}]") return self.nuscenes_dataset[nuscenes_index]
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/__init__.py
Python
from collections import namedtuple import numpy as np import torch from pcdet.models.detectors import build_detector def build_network(model_cfg, num_class, dataset): model = build_detector(model_cfg=model_cfg, num_class=num_class, dataset=dataset) return model def load_data_to_gpu(batch_dict): for key, val in batch_dict.items(): if not isinstance(val, np.ndarray): continue if val.dtype == np.str_: continue if key in ["frame_id", "metadata", "calib", "image_shape"]: continue batch_dict[key] = torch.from_numpy(val).float().cuda() def model_fn_decorator(): ModelReturn = namedtuple("ModelReturn", ["loss", "tb_dict", "disp_dict"]) def model_func(model, batch_dict): load_data_to_gpu(batch_dict) ret_dict, tb_dict, disp_dict = model(batch_dict) loss = ret_dict["loss"].mean() if hasattr(model, "update_global_step"): model.update_global_step() else: model.module.update_global_step() return ModelReturn(loss, tb_dict, disp_dict) return model_func
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/backbones_2d/__init__.py
Python
from pcdet.models.backbones_2d.base_bev_backbone import BaseBEVBackbone __all__ = {"BaseBEVBackbone": BaseBEVBackbone}
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/backbones_2d/base_bev_backbone.py
Python
import numpy as np import torch import torch.nn as nn class BaseBEVBackbone(nn.Module): def __init__(self, model_cfg, input_channels): super().__init__() self.model_cfg = model_cfg if self.model_cfg.get("LAYER_NUMS", None) is not None: assert ( len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS) ) layer_nums = self.model_cfg.LAYER_NUMS layer_strides = self.model_cfg.LAYER_STRIDES num_filters = self.model_cfg.NUM_FILTERS else: layer_nums = layer_strides = num_filters = [] if self.model_cfg.get("UPSAMPLE_STRIDES", None) is not None: assert len(self.model_cfg.UPSAMPLE_STRIDES) == len( self.model_cfg.NUM_UPSAMPLE_FILTERS ) num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS upsample_strides = self.model_cfg.UPSAMPLE_STRIDES else: upsample_strides = num_upsample_filters = [] num_levels = len(layer_nums) c_in_list = [input_channels, *num_filters[:-1]] self.blocks = nn.ModuleList() self.deblocks = nn.ModuleList() for idx in range(num_levels): cur_layers = [ nn.ZeroPad2d(1), nn.Conv2d( c_in_list[idx], num_filters[idx], kernel_size=3, stride=layer_strides[idx], padding=0, bias=False, ), nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), nn.ReLU(), ] for k in range(layer_nums[idx]): cur_layers.extend( [ nn.Conv2d( num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False, ), nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), nn.ReLU(), ] ) self.blocks.append(nn.Sequential(*cur_layers)) if len(upsample_strides) > 0: stride = upsample_strides[idx] if stride >= 1: self.deblocks.append( nn.Sequential( nn.ConvTranspose2d( num_filters[idx], num_upsample_filters[idx], upsample_strides[idx], stride=upsample_strides[idx], bias=False, ), nn.BatchNorm2d( num_upsample_filters[idx], eps=1e-3, momentum=0.01 ), nn.ReLU(), ) ) else: stride = np.round(1 / stride).astype(np.int) self.deblocks.append( nn.Sequential( nn.Conv2d( num_filters[idx], num_upsample_filters[idx], stride, stride=stride, bias=False, ), nn.BatchNorm2d( num_upsample_filters[idx], eps=1e-3, momentum=0.01 ), nn.ReLU(), ) ) c_in = sum(num_upsample_filters) if len(upsample_strides) > num_levels: self.deblocks.append( nn.Sequential( nn.ConvTranspose2d( c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False, ), nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), nn.ReLU(), ) ) self.num_bev_features = c_in def forward(self, data_dict): """ Args: data_dict: spatial_features Returns: """ spatial_features = data_dict["spatial_features"] ups = [] ret_dict = {} x = spatial_features for i in range(len(self.blocks)): x = self.blocks[i](x) stride = int(spatial_features.shape[2] / x.shape[2]) ret_dict["spatial_features_%dx" % stride] = x if len(self.deblocks) > 0: ups.append(self.deblocks[i](x)) else: ups.append(x) if len(ups) > 1: x = torch.cat(ups, dim=1) elif len(ups) == 1: x = ups[0] if len(self.deblocks) > len(self.blocks): x = self.deblocks[-1](x) data_dict["spatial_features_2d"] = x return data_dict
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/backbones_2d/map_to_bev/__init__.py
Python
from pcdet.models.backbones_2d.map_to_bev.height_compression import HeightCompression from pcdet.models.backbones_2d.map_to_bev.pointpillar_scatter import PointPillarScatter __all__ = { "HeightCompression": HeightCompression, "PointPillarScatter": PointPillarScatter, }
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/backbones_2d/map_to_bev/height_compression.py
Python
import torch.nn as nn class HeightCompression(nn.Module): def __init__(self, model_cfg, **kwargs): super().__init__() self.model_cfg = model_cfg self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES def forward(self, batch_dict): """ Args: batch_dict: encoded_spconv_tensor: sparse tensor Returns: batch_dict: spatial_features: """ encoded_spconv_tensor = batch_dict["encoded_spconv_tensor"] spatial_features = encoded_spconv_tensor.dense() N, C, D, H, W = spatial_features.shape spatial_features = spatial_features.view(N, C * D, H, W) batch_dict["spatial_features"] = spatial_features batch_dict["spatial_features_stride"] = batch_dict[ "encoded_spconv_tensor_stride" ] return batch_dict
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS
pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py
Python
import torch import torch.nn as nn class PointPillarScatter(nn.Module): def __init__(self, model_cfg, grid_size, **kwargs): super().__init__() self.model_cfg = model_cfg self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES self.nx, self.ny, self.nz = grid_size assert self.nz == 1 def forward(self, batch_dict, **kwargs): pillar_features, coords = ( batch_dict["pillar_features"], batch_dict["voxel_coords"], ) batch_spatial_features = [] batch_size = coords[:, 0].max().int().item() + 1 for batch_idx in range(batch_size): spatial_feature = torch.zeros( self.num_bev_features, self.nz * self.nx * self.ny, dtype=pillar_features.dtype, device=pillar_features.device, ) batch_mask = coords[:, 0] == batch_idx this_coords = coords[batch_mask, :] indices = ( this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3] ) indices = indices.type(torch.long) pillars = pillar_features[batch_mask, :] pillars = pillars.t() spatial_feature[:, indices] = pillars batch_spatial_features.append(spatial_feature) batch_spatial_features = torch.stack(batch_spatial_features, 0) batch_spatial_features = batch_spatial_features.view( batch_size, self.num_bev_features * self.nz, self.ny, self.nx ) batch_dict["spatial_features"] = batch_spatial_features return batch_dict
yxlao/lit
24
(NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator
Python
yxlao
Yixing Lao
HKU-CS