repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/row_selection.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/utilities/row_selection.hpp> #include <cudf/utilities/error.hpp> #include <algorithm> #include <limits> namespace cudf::io::detail { std::pair<uint64_t, size_type> skip_rows_num_rows_from_options( uint64_t skip_rows, std::optional<size_type> const& num_rows, uint64_t num_source_rows) { auto const rows_to_skip = std::min(skip_rows, num_source_rows); if (not num_rows.has_value()) { CUDF_EXPECTS(num_source_rows - rows_to_skip <= std::numeric_limits<size_type>::max(), "The requested number of rows exceeds the column size limit", std::overflow_error); return {rows_to_skip, num_source_rows - rows_to_skip}; } // Limit the number of rows to the end of the input return { rows_to_skip, static_cast<size_type>(std::min<uint64_t>(num_rows.value(), num_source_rows - rows_to_skip))}; } } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/file_io_utilities.cpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "file_io_utilities.hpp" #include <cudf/detail/utilities/integer_utils.hpp> #include <io/utilities/config_utils.hpp> #include <rmm/device_buffer.hpp> #include <dlfcn.h> #include <fstream> #include <numeric> namespace cudf { namespace io { namespace detail { size_t get_file_size(int file_descriptor) { struct stat st; CUDF_EXPECTS(fstat(file_descriptor, &st) != -1, "Cannot query file size"); return static_cast<size_t>(st.st_size); } file_wrapper::file_wrapper(std::string const& filepath, int flags) : fd(open(filepath.c_str(), flags)), _size{get_file_size(fd)} { CUDF_EXPECTS(fd != -1, "Cannot open file " + filepath); } file_wrapper::file_wrapper(std::string const& filepath, int flags, mode_t mode) : fd(open(filepath.c_str(), flags, mode)), _size{get_file_size(fd)} { CUDF_EXPECTS(fd != -1, "Cannot open file " + filepath); } file_wrapper::~file_wrapper() { close(fd); } #ifdef CUFILE_FOUND /** * @brief Class that dynamically loads the cuFile library and manages the cuFile driver. */ class cufile_shim { private: cufile_shim(); void modify_cufile_json() const; void load_cufile_lib(); void* cf_lib = nullptr; decltype(cuFileDriverOpen)* driver_open = nullptr; decltype(cuFileDriverClose)* driver_close = nullptr; std::unique_ptr<cudf::logic_error> init_error; auto is_valid() const noexcept { return init_error == nullptr; } public: cufile_shim(cufile_shim const&) = delete; cufile_shim& operator=(cufile_shim const&) = delete; static cufile_shim const* instance(); ~cufile_shim() { if (driver_close != nullptr) driver_close(); if (cf_lib != nullptr) dlclose(cf_lib); } decltype(cuFileHandleRegister)* handle_register = nullptr; decltype(cuFileHandleDeregister)* handle_deregister = nullptr; decltype(cuFileRead)* read = nullptr; decltype(cuFileWrite)* write = nullptr; }; void cufile_shim::modify_cufile_json() const { std::string const json_path_env_var = "CUFILE_ENV_PATH_JSON"; static temp_directory tmp_config_dir{"cudf_cufile_config"}; // Modify the config file based on the policy auto const config_file_path = getenv_or<std::string>(json_path_env_var, "/etc/cufile.json"); std::ifstream user_config_file(config_file_path); // Modified config file is stored in a temporary directory auto const cudf_config_path = tmp_config_dir.path() + "cufile.json"; std::ofstream cudf_config_file(cudf_config_path); std::string line; while (std::getline(user_config_file, line)) { std::string const tag = "\"allow_compat_mode\""; if (line.find(tag) != std::string::npos) { // TODO: only replace the true/false value instead of replacing the whole line // Enable compatibility mode when cuDF does not fall back to host path cudf_config_file << tag << ": " << (cufile_integration::is_always_enabled() ? "true" : "false") << ",\n"; } else { cudf_config_file << line << '\n'; } // Point libcufile to the modified config file CUDF_EXPECTS(setenv(json_path_env_var.c_str(), cudf_config_path.c_str(), 0) == 0, "Failed to set the cuFile config file environment variable."); } } void cufile_shim::load_cufile_lib() { for (auto&& name : {"libcufile.so.0", // Prior to CUDA 11.7.1, although ABI // compatibility was maintained, some (at least // Debian) packages do not have the .0 symlink, // instead request the exact version. "libcufile.so.1.3.0" /* 11.7.0 */, "libcufile.so.1.2.1" /* 11.6.2, 11.6.1 */, "libcufile.so.1.2.0" /* 11.6.0 */, "libcufile.so.1.1.1" /* 11.5.1 */, "libcufile.so.1.1.0" /* 11.5.0 */, "libcufile.so.1.0.2" /* 11.4.4, 11.4.3, 11.4.2 */, "libcufile.so.1.0.1" /* 11.4.1 */, "libcufile.so.1.0.0" /* 11.4.0 */}) { cf_lib = dlopen(name, RTLD_LAZY | RTLD_LOCAL | RTLD_NODELETE); if (cf_lib != nullptr) break; } CUDF_EXPECTS(cf_lib != nullptr, "Failed to load cuFile library"); driver_open = reinterpret_cast<decltype(driver_open)>(dlsym(cf_lib, "cuFileDriverOpen")); CUDF_EXPECTS(driver_open != nullptr, "could not find cuFile cuFileDriverOpen symbol"); driver_close = reinterpret_cast<decltype(driver_close)>(dlsym(cf_lib, "cuFileDriverClose")); CUDF_EXPECTS(driver_close != nullptr, "could not find cuFile cuFileDriverClose symbol"); handle_register = reinterpret_cast<decltype(handle_register)>(dlsym(cf_lib, "cuFileHandleRegister")); CUDF_EXPECTS(handle_register != nullptr, "could not find cuFile cuFileHandleRegister symbol"); handle_deregister = reinterpret_cast<decltype(handle_deregister)>(dlsym(cf_lib, "cuFileHandleDeregister")); CUDF_EXPECTS(handle_deregister != nullptr, "could not find cuFile cuFileHandleDeregister symbol"); read = reinterpret_cast<decltype(read)>(dlsym(cf_lib, "cuFileRead")); CUDF_EXPECTS(read != nullptr, "could not find cuFile cuFileRead symbol"); write = reinterpret_cast<decltype(write)>(dlsym(cf_lib, "cuFileWrite")); CUDF_EXPECTS(write != nullptr, "could not find cuFile cuFileWrite symbol"); } cufile_shim::cufile_shim() { try { modify_cufile_json(); load_cufile_lib(); CUDF_EXPECTS(driver_open().err == CU_FILE_SUCCESS, "Failed to initialize cuFile driver"); } catch (cudf::logic_error const& err) { init_error = std::make_unique<cudf::logic_error>(err); } } cufile_shim const* cufile_shim::instance() { static cufile_shim _instance; // Defer throwing to avoid repeated attempts to load the library if (!_instance.is_valid()) CUDF_FAIL("" + std::string(_instance.init_error->what())); return &_instance; } void cufile_registered_file::register_handle() { CUfileDescr_t cufile_desc{}; cufile_desc.handle.fd = _file.desc(); cufile_desc.type = CU_FILE_HANDLE_TYPE_OPAQUE_FD; CUDF_EXPECTS(shim->handle_register(&cf_handle, &cufile_desc).err == CU_FILE_SUCCESS, "Cannot register file handle with cuFile"); } cufile_registered_file::~cufile_registered_file() { shim->handle_deregister(cf_handle); } cufile_input_impl::cufile_input_impl(std::string const& filepath) : shim{cufile_shim::instance()}, cf_file(shim, filepath, O_RDONLY | O_DIRECT), // The benefit from multithreaded read plateaus around 16 threads pool(getenv_or("LIBCUDF_CUFILE_THREAD_COUNT", 16)) { pool.sleep_duration = 10; } namespace { template <typename DataT, typename F, typename ResultT = std::invoke_result_t<F, DataT*, size_t, size_t>> std::vector<std::future<ResultT>> make_sliced_tasks( F function, DataT* ptr, size_t offset, size_t size, cudf::detail::thread_pool& pool) { constexpr size_t default_max_slice_size = 4 * 1024 * 1024; static auto const max_slice_size = getenv_or("LIBCUDF_CUFILE_SLICE_SIZE", default_max_slice_size); auto const slices = make_file_io_slices(size, max_slice_size); std::vector<std::future<ResultT>> slice_tasks; std::transform(slices.cbegin(), slices.cend(), std::back_inserter(slice_tasks), [&](auto& slice) { return pool.submit(function, ptr + slice.offset, slice.size, offset + slice.offset); }); return slice_tasks; } } // namespace std::future<size_t> cufile_input_impl::read_async(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) { int device; CUDF_CUDA_TRY(cudaGetDevice(&device)); auto read_slice = [device, gds_read = shim->read, file_handle = cf_file.handle()]( void* dst, size_t size, size_t offset) -> ssize_t { CUDF_CUDA_TRY(cudaSetDevice(device)); auto read_size = gds_read(file_handle, dst, size, offset, 0); CUDF_EXPECTS(read_size != -1, "cuFile error reading from a file"); return read_size; }; auto slice_tasks = make_sliced_tasks(read_slice, dst, offset, size, pool); auto waiter = [](auto slice_tasks) -> size_t { return std::accumulate(slice_tasks.begin(), slice_tasks.end(), 0, [](auto sum, auto& task) { return sum + task.get(); }); }; // The future returned from this function is deferred, not async because we want to avoid creating // threads for each read_async call. This overhead is significant in case of multiple small reads. return std::async(std::launch::deferred, waiter, std::move(slice_tasks)); } cufile_output_impl::cufile_output_impl(std::string const& filepath) : shim{cufile_shim::instance()}, cf_file(shim, filepath, O_CREAT | O_RDWR | O_DIRECT, 0664), pool(getenv_or("LIBCUDF_CUFILE_THREAD_COUNT", 16)) { } std::future<void> cufile_output_impl::write_async(void const* data, size_t offset, size_t size) { int device; CUDF_CUDA_TRY(cudaGetDevice(&device)); auto write_slice = [device, gds_write = shim->write, file_handle = cf_file.handle()]( void const* src, size_t size, size_t offset) -> void { CUDF_CUDA_TRY(cudaSetDevice(device)); auto write_size = gds_write(file_handle, src, size, offset, 0); CUDF_EXPECTS(write_size != -1 and write_size == static_cast<decltype(write_size)>(size), "cuFile error writing to a file"); }; auto source = static_cast<uint8_t const*>(data); auto slice_tasks = make_sliced_tasks(write_slice, source, offset, size, pool); auto waiter = [](auto slice_tasks) -> void { for (auto const& task : slice_tasks) { task.wait(); } }; // The future returned from this function is deferred, not async because we want to avoid creating // threads for each write_async call. This overhead is significant in case of multiple small // writes. return std::async(std::launch::deferred, waiter, std::move(slice_tasks)); } #else cufile_input_impl::cufile_input_impl(std::string const& filepath) { CUDF_FAIL("Cannot create cuFile source, current build was compiled without cuFile headers"); } cufile_output_impl::cufile_output_impl(std::string const& filepath) { CUDF_FAIL("Cannot create cuFile sink, current build was compiled without cuFile headers"); } #endif std::unique_ptr<cufile_input_impl> make_cufile_input(std::string const& filepath) { if (cufile_integration::is_gds_enabled()) { try { auto cufile_in = std::make_unique<cufile_input_impl>(filepath); CUDF_LOG_INFO("File successfully opened for reading with GDS."); return cufile_in; } catch (...) { if (cufile_integration::is_always_enabled()) { CUDF_LOG_ERROR( "Failed to open file for reading with GDS. Enable bounce buffer fallback to read this " "file."); throw; } CUDF_LOG_INFO( "Failed to open file for reading with GDS. Data will be read from the file using a bounce " "buffer (possible performance impact)."); } } return {}; } std::unique_ptr<cufile_output_impl> make_cufile_output(std::string const& filepath) { if (cufile_integration::is_gds_enabled()) { try { auto cufile_out = std::make_unique<cufile_output_impl>(filepath); CUDF_LOG_INFO("File successfully opened for writing with GDS."); return cufile_out; } catch (...) { if (cufile_integration::is_always_enabled()) { CUDF_LOG_ERROR( "Failed to open file for writing with GDS. Enable bounce buffer fallback to write to " "this file."); throw; } CUDF_LOG_INFO( "Failed to open file for writing with GDS. Data will be written to the file using a bounce " "buffer (possible performance impact)."); } } return {}; } std::vector<file_io_slice> make_file_io_slices(size_t size, size_t max_slice_size) { max_slice_size = std::max(1024ul, max_slice_size); auto const n_slices = util::div_rounding_up_safe(size, max_slice_size); std::vector<file_io_slice> slices; slices.reserve(n_slices); std::generate_n(std::back_inserter(slices), n_slices, [&, idx = 0]() mutable { auto const slice_offset = idx++ * max_slice_size; auto const slice_size = std::min(size - slice_offset, max_slice_size); return file_io_slice{slice_offset, slice_size}; }); return slices; } } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/hostdevice_span.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf::detail { template <typename T> class hostdevice_span { public: using value_type = T; hostdevice_span() = default; ~hostdevice_span() = default; hostdevice_span(hostdevice_span const&) = default; ///< Copy constructor hostdevice_span(hostdevice_span&&) = default; ///< Move constructor hostdevice_span(T* cpu_data, T* gpu_data, size_t size) : _size(size), _host_data(cpu_data), _device_data(gpu_data) { } /** * @brief Copy assignment operator. * * @return Reference to this hostdevice_span. */ constexpr hostdevice_span& operator=(hostdevice_span const&) noexcept = default; /** * @brief Converts a hostdevice view into a device span. * * @tparam T The device span type. * @return A typed device span of the hostdevice view's data. */ [[nodiscard]] operator cudf::device_span<T>() const { return cudf::device_span(_device_data, size()); } /** * @brief Returns the underlying device data. * * @tparam T The type to cast to * @return T const* Typed pointer to underlying data */ [[nodiscard]] T* device_ptr(size_t offset = 0) const noexcept { return _device_data + offset; } /** * @brief Return first element in device data. * * @tparam T The desired type * @return T const* Pointer to the first element */ [[nodiscard]] T* device_begin() const noexcept { return device_ptr(); } /** * @brief Return one past the last element in device_data. * * @tparam T The desired type * @return T const* Pointer to one past the last element */ [[nodiscard]] T* device_end() const noexcept { return device_begin() + size(); } /** * @brief Converts a hostdevice_span into a host span. * * @tparam T The host span type. * @return A typed host span of the hostdevice_span's data. */ [[nodiscard]] operator cudf::host_span<T>() const noexcept { return cudf::host_span<T>(_host_data, size()); } /** * @brief Returns the underlying host data. * * @tparam T The type to cast to * @return T* Typed pointer to underlying data */ [[nodiscard]] T* host_ptr(size_t offset = 0) const noexcept { return _host_data + offset; } /** * @brief Return first element in host data. * * @tparam T The desired type * @return T const* Pointer to the first element */ [[nodiscard]] T* host_begin() const noexcept { return host_ptr(); } /** * @brief Return one past the last elementin host data. * * @tparam T The desired type * @return T const* Pointer to one past the last element */ [[nodiscard]] T* host_end() const noexcept { return host_begin() + size(); } /** * @brief Returns the number of elements in the view * * @return The number of elements in the view */ [[nodiscard]] std::size_t size() const noexcept { return _size; } /** * @brief Returns true if `size()` returns zero, or false otherwise * * @return True if `size()` returns zero, or false otherwise */ [[nodiscard]] bool is_empty() const noexcept { return size() == 0; } [[nodiscard]] size_t size_bytes() const noexcept { return sizeof(T) * size(); } [[nodiscard]] T& operator[](size_t i) { return _host_data[i]; } [[nodiscard]] T const& operator[](size_t i) const { return _host_data[i]; } /** * @brief Obtains a hostdevice_span that is a view over the `count` elements of this * hostdevice_span starting at offset * * @param offset The offset of the first element in the subspan * @param count The number of elements in the subspan * @return A subspan of the sequence, of requested count and offset */ constexpr hostdevice_span<T> subspan(size_t offset, size_t count) const noexcept { return hostdevice_span<T>(_host_data + offset, _device_data + offset, count); } void host_to_device_async(rmm::cuda_stream_view stream) { CUDF_CUDA_TRY( cudaMemcpyAsync(device_ptr(), host_ptr(), size_bytes(), cudaMemcpyDefault, stream.value())); } void host_to_device_sync(rmm::cuda_stream_view stream) { host_to_device_async(stream); stream.synchronize(); } void device_to_host_async(rmm::cuda_stream_view stream) { CUDF_CUDA_TRY( cudaMemcpyAsync(host_ptr(), device_ptr(), size_bytes(), cudaMemcpyDefault, stream.value())); } void device_to_host_sync(rmm::cuda_stream_view stream) { device_to_host_async(stream); stream.synchronize(); } private: size_t _size{}; ///< Number of elements T* _device_data{}; ///< Pointer to device memory containing elements T* _host_data{}; ///< Pointer to host memory containing elements }; } // namespace cudf::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/trie.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @brief Serialized trie implementation for C++/CUDA * @file trie.cu */ #include "trie.cuh" #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/span.hpp> #include <cuda_runtime.h> #include <deque> #include <string> #include <vector> namespace cudf { namespace detail { rmm::device_uvector<serial_trie_node> create_serialized_trie(std::vector<std::string> const& keys, rmm::cuda_stream_view stream) { if (keys.empty()) { return rmm::device_uvector<serial_trie_node>{0, stream}; } static constexpr int alphabet_size = std::numeric_limits<char>::max() + 1; struct TreeTrieNode { using TrieNodePtr = std::unique_ptr<TreeTrieNode>; std::array<TrieNodePtr, alphabet_size> children; bool is_end_of_word = false; }; // Construct a tree-structured trie // The trie takes a lot of memory, but the lookup is fast: // allows direct addressing of children nodes TreeTrieNode tree_trie; for (auto const& key : keys) { auto* current_node = &tree_trie; for (char const character : key) { if (current_node->children[character] == nullptr) current_node->children[character] = std::make_unique<TreeTrieNode>(); current_node = current_node->children[character].get(); } current_node->is_end_of_word = true; } struct IndexedTrieNode { TreeTrieNode const* const pnode; int16_t const idx; IndexedTrieNode(TreeTrieNode const* const node, int16_t index) : pnode(node), idx(index) {} }; // Serialize the tree trie std::deque<IndexedTrieNode> to_visit; std::vector<serial_trie_node> nodes; // If the Tree trie matches empty strings, the root node is marked as 'end of word'. // The first node in the serialized trie is also used to match empty strings, so we're // initializing it using the `is_end_of_word` value from the root node. nodes.push_back(serial_trie_node(trie_terminating_character, tree_trie.is_end_of_word)); // Add root node to queue. this node is not included to the serialized trie to_visit.emplace_back(&tree_trie, -1); while (!to_visit.empty()) { auto const node_and_idx = to_visit.front(); auto const node = node_and_idx.pnode; auto const idx = node_and_idx.idx; to_visit.pop_front(); bool has_children = false; for (size_t i = 0; i < node->children.size(); ++i) { if (node->children[i] != nullptr) { // Update the children offset of the parent node, unless at the root if (idx >= 0 && nodes[idx].children_offset < 0) { nodes[idx].children_offset = static_cast<uint16_t>(nodes.size() - idx); } // Add node to the trie nodes.push_back(serial_trie_node(static_cast<char>(i), node->children[i]->is_end_of_word)); // Add to the queue, with the index within the new trie to_visit.emplace_back(node->children[i].get(), static_cast<uint16_t>(nodes.size()) - 1); has_children = true; } } // Only add the terminating character if any nodes were added if (has_children) { nodes.push_back(serial_trie_node(trie_terminating_character)); } } return cudf::detail::make_device_uvector_sync( nodes, stream, rmm::mr::get_current_device_resource()); } } // namespace detail } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/column_type_histogram.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> namespace cudf { namespace io { /** * @brief Per-column histogram struct containing detected occurrences of each dtype */ struct column_type_histogram { cudf::size_type null_count{}; cudf::size_type float_count{}; cudf::size_type datetime_count{}; cudf::size_type string_count{}; cudf::size_type negative_small_int_count{}; cudf::size_type positive_small_int_count{}; cudf::size_type big_int_count{}; cudf::size_type bool_count{}; auto total_count() const { return null_count + float_count + datetime_count + string_count + negative_small_int_count + positive_small_int_count + big_int_count + bool_count; } }; } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/data_sink.cpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fstream> #include "file_io_utilities.hpp" #include <cudf/io/data_sink.hpp> #include <cudf/utilities/error.hpp> #include <io/utilities/config_utils.hpp> #include <kvikio/file_handle.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { /** * @brief Implementation class for storing data into a local file. */ class file_sink : public data_sink { public: explicit file_sink(std::string const& filepath) { _output_stream.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(_output_stream.is_open(), "Cannot open output file"); if (detail::cufile_integration::is_kvikio_enabled()) { _kvikio_file = kvikio::FileHandle(filepath, "w"); CUDF_LOG_INFO("Writing a file using kvikIO, with compatibility mode {}.", _kvikio_file.is_compat_mode_on() ? "on" : "off"); } else { _cufile_out = detail::make_cufile_output(filepath); } } virtual ~file_sink() { flush(); } void host_write(void const* data, size_t size) override { _output_stream.seekp(_bytes_written); _output_stream.write(static_cast<char const*>(data), size); _bytes_written += size; } void flush() override { _output_stream.flush(); } size_t bytes_written() override { return _bytes_written; } [[nodiscard]] bool supports_device_write() const override { return !_kvikio_file.closed() || _cufile_out != nullptr; } [[nodiscard]] bool is_device_write_preferred(size_t size) const override { if (size < _gds_write_preferred_threshold) { return false; } return supports_device_write(); } std::future<void> device_write_async(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { if (!supports_device_write()) CUDF_FAIL("Device writes are not supported for this file."); size_t offset = _bytes_written; _bytes_written += size; if (!_kvikio_file.closed()) { // KvikIO's `pwrite()` returns a `std::future<size_t>` so we convert it // to `std::future<void>` return std::async(std::launch::deferred, [this, gpu_data, size, offset] { _kvikio_file.pwrite(gpu_data, size, offset).get(); }); } return _cufile_out->write_async(gpu_data, offset, size); } void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { return device_write_async(gpu_data, size, stream).get(); } private: std::ofstream _output_stream; size_t _bytes_written = 0; std::unique_ptr<detail::cufile_output_impl> _cufile_out; kvikio::FileHandle _kvikio_file; // The write size above which GDS is faster then d2h-copy + posix-write static constexpr size_t _gds_write_preferred_threshold = 128 << 10; // 128KB }; /** * @brief Implementation class for storing data into a std::vector. */ class host_buffer_sink : public data_sink { public: explicit host_buffer_sink(std::vector<char>* buffer) : buffer_(buffer) {} virtual ~host_buffer_sink() { flush(); } void host_write(void const* data, size_t size) override { auto char_array = static_cast<char const*>(data); buffer_->insert(buffer_->end(), char_array, char_array + size); } void flush() override {} size_t bytes_written() override { return buffer_->size(); } private: std::vector<char>* buffer_; }; /** * @brief Implementation class for voiding data (no io performed) */ class void_sink : public data_sink { public: explicit void_sink() {} virtual ~void_sink() {} void host_write(void const* data, size_t size) override { _bytes_written += size; } [[nodiscard]] bool supports_device_write() const override { return true; } void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { _bytes_written += size; } std::future<void> device_write_async(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { _bytes_written += size; return std::async(std::launch::deferred, [] {}); } void flush() override {} size_t bytes_written() override { return _bytes_written; } private: size_t _bytes_written; }; class user_sink_wrapper : public data_sink { public: explicit user_sink_wrapper(cudf::io::data_sink* const user_sink_) : user_sink(user_sink_) {} virtual ~user_sink_wrapper() {} void host_write(void const* data, size_t size) override { user_sink->host_write(data, size); } [[nodiscard]] bool supports_device_write() const override { return user_sink->supports_device_write(); } void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { CUDF_EXPECTS(user_sink->supports_device_write(), "device_write() was called on a data_sink that doesn't support it"); user_sink->device_write(gpu_data, size, stream); } std::future<void> device_write_async(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override { CUDF_EXPECTS(user_sink->supports_device_write(), "device_write_async() was called on a data_sink that doesn't support it"); return user_sink->device_write_async(gpu_data, size, stream); } void flush() override { user_sink->flush(); } size_t bytes_written() override { return user_sink->bytes_written(); } private: cudf::io::data_sink* const user_sink; }; std::unique_ptr<data_sink> data_sink::create(std::string const& filepath) { return std::make_unique<file_sink>(filepath); } std::unique_ptr<data_sink> data_sink::create(std::vector<char>* buffer) { return std::make_unique<host_buffer_sink>(buffer); } std::unique_ptr<data_sink> data_sink::create() { return std::make_unique<void_sink>(); } std::unique_ptr<data_sink> data_sink::create(cudf::io::data_sink* const user_sink) { return std::make_unique<user_sink_wrapper>(user_sink); } } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/column_utils.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <io/statistics/statistics.cuh> #include <cudf/column/column_device_view.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> namespace cudf { namespace io { /** * @brief Create column_device_view pointers from leaf columns * * A device_uvector is created to store the leaves of parent columns. The * column descriptor array is updated to point to these leaf columns. * * @tparam ColumnDescriptor Struct describing properties of columns with * pointers to leaf and parent columns * * @param col_desc Column description array * @param parent_table_device_view Table device view containing parent columns * @param stream CUDA stream to use * * @return Device array containing leaf column device views */ template <typename ColumnDescriptor> rmm::device_uvector<column_device_view> create_leaf_column_device_views( typename cudf::device_span<ColumnDescriptor> col_desc, table_device_view const& parent_table_device_view, rmm::cuda_stream_view stream) { rmm::device_uvector<column_device_view> leaf_column_views(parent_table_device_view.num_columns(), stream); auto leaf_columns = cudf::device_span<column_device_view>{leaf_column_views}; auto iter = thrust::make_counting_iterator<size_type>(0); thrust::for_each( rmm::exec_policy(stream), iter, iter + parent_table_device_view.num_columns(), [col_desc, parent_col_view = parent_table_device_view, leaf_columns] __device__( size_type index) { col_desc[index].parent_column = parent_col_view.begin() + index; column_device_view col = parent_col_view.column(index); // traverse till leaf column while (col.type().id() == type_id::LIST || col.type().id() == type_id::STRUCT) { auto const child = (col.type().id() == type_id::LIST) ? col.child(lists_column_view::child_column_index) : col.child(0); // stop early if writing a byte array if (col_desc[index].stats_dtype == dtype_byte_array && child.type().id() == type_id::UINT8) { break; } col = child; } // Store leaf_column to device storage column_device_view* leaf_col_ptr = leaf_columns.begin() + index; *leaf_col_ptr = col; col_desc[index].leaf_column = leaf_col_ptr; }); return leaf_column_views; } } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/datasource.cpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "file_io_utilities.hpp" #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/io/arrow_io_source.hpp> #include <cudf/io/datasource.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <io/utilities/config_utils.hpp> #include <kvikio/file_handle.hpp> #include <rmm/device_buffer.hpp> #include <arrow/io/memory.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #include <unordered_map> namespace cudf { namespace io { namespace { /** * @brief Base class for file input. Only implements direct device reads. */ class file_source : public datasource { public: explicit file_source(char const* filepath) : _file(filepath, O_RDONLY) { if (detail::cufile_integration::is_kvikio_enabled()) { _kvikio_file = kvikio::FileHandle(filepath); CUDF_LOG_INFO("Reading a file using kvikIO, with compatibility mode {}.", _kvikio_file.is_compat_mode_on() ? "on" : "off"); } else { _cufile_in = detail::make_cufile_input(filepath); } } virtual ~file_source() = default; [[nodiscard]] bool supports_device_read() const override { return !_kvikio_file.closed() || _cufile_in != nullptr; } [[nodiscard]] bool is_device_read_preferred(size_t size) const override { if (size < _gds_read_preferred_threshold) { return false; } return supports_device_read(); } std::future<size_t> device_read_async(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { CUDF_EXPECTS(supports_device_read(), "Device reads are not supported for this file."); auto const read_size = std::min(size, _file.size() - offset); if (!_kvikio_file.closed()) { return _kvikio_file.pread(dst, read_size, offset); } return _cufile_in->read_async(offset, read_size, dst, stream); } size_t device_read(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { return device_read_async(offset, size, dst, stream).get(); } std::unique_ptr<datasource::buffer> device_read(size_t offset, size_t size, rmm::cuda_stream_view stream) override { rmm::device_buffer out_data(size, stream); size_t read = device_read(offset, size, reinterpret_cast<uint8_t*>(out_data.data()), stream); out_data.resize(read, stream); return datasource::buffer::create(std::move(out_data)); } [[nodiscard]] size_t size() const override { return _file.size(); } protected: detail::file_wrapper _file; private: std::unique_ptr<detail::cufile_input_impl> _cufile_in; kvikio::FileHandle _kvikio_file; // The read size above which GDS is faster then posix-read + h2d-copy static constexpr size_t _gds_read_preferred_threshold = 128 << 10; // 128KB }; /** * @brief Memoized pageableMemoryAccessUsesHostPageTables device property. */ [[nodiscard]] bool pageableMemoryAccessUsesHostPageTables() { static std::unordered_map<int, bool> result_cache{}; int deviceId{}; CUDF_CUDA_TRY(cudaGetDevice(&deviceId)); if (result_cache.find(deviceId) == result_cache.end()) { cudaDeviceProp props{}; CUDF_CUDA_TRY(cudaGetDeviceProperties(&props, deviceId)); result_cache[deviceId] = (props.pageableMemoryAccessUsesHostPageTables == 1); CUDF_LOG_INFO( "Device {} pageableMemoryAccessUsesHostPageTables: {}", deviceId, result_cache[deviceId]); } return result_cache[deviceId]; } /** * @brief Implementation class for reading from a file using memory mapped access. * * Unlike Arrow's memory mapped IO class, this implementation allows memory mapping a subset of the * file where the starting offset may not be zero. */ class memory_mapped_source : public file_source { public: explicit memory_mapped_source(char const* filepath, size_t offset, size_t size) : file_source(filepath) { if (_file.size() != 0) { map(_file.desc(), offset, size); register_mmap_buffer(); } } ~memory_mapped_source() override { if (_map_addr != nullptr) { munmap(_map_addr, _map_size); unregister_mmap_buffer(); } } std::unique_ptr<buffer> host_read(size_t offset, size_t size) override { CUDF_EXPECTS(offset >= _map_offset, "Requested offset is outside mapping"); // Clamp length to available data in the mapped region auto const read_size = std::min(size, _map_size - (offset - _map_offset)); return std::make_unique<non_owning_buffer>( static_cast<uint8_t*>(_map_addr) + (offset - _map_offset), read_size); } size_t host_read(size_t offset, size_t size, uint8_t* dst) override { CUDF_EXPECTS(offset >= _map_offset, "Requested offset is outside mapping"); // Clamp length to available data in the mapped region auto const read_size = std::min(size, _map_size - (offset - _map_offset)); auto const src = static_cast<uint8_t*>(_map_addr) + (offset - _map_offset); std::memcpy(dst, src, read_size); return read_size; } private: /** * @brief Page-locks (registers) the memory range of the mapped file. * * Fixes nvbugs/4215160 */ void register_mmap_buffer() { if (_map_addr == nullptr or _map_size == 0 or not pageableMemoryAccessUsesHostPageTables()) { return; } auto const result = cudaHostRegister(_map_addr, _map_size, cudaHostRegisterDefault); if (result == cudaSuccess) { _is_map_registered = true; } else { CUDF_LOG_WARN("cudaHostRegister failed with {} ({})", result, cudaGetErrorString(result)); } } /** * @brief Unregisters the memory range of the mapped file. */ void unregister_mmap_buffer() { if (not _is_map_registered) { return; } auto const result = cudaHostUnregister(_map_addr); if (result != cudaSuccess) { CUDF_LOG_WARN("cudaHostUnregister failed with {} ({})", result, cudaGetErrorString(result)); } } void map(int fd, size_t offset, size_t size) { CUDF_EXPECTS(offset < _file.size(), "Offset is past end of file"); // Offset for `mmap()` must be page aligned _map_offset = offset & ~(sysconf(_SC_PAGESIZE) - 1); if (size == 0 || (offset + size) > _file.size()) { size = _file.size() - offset; } // Size for `mmap()` needs to include the page padding _map_size = size + (offset - _map_offset); // Check if accessing a region within already mapped area _map_addr = mmap(nullptr, _map_size, PROT_READ, MAP_PRIVATE, fd, _map_offset); CUDF_EXPECTS(_map_addr != MAP_FAILED, "Cannot create memory mapping"); } private: size_t _map_size = 0; size_t _map_offset = 0; void* _map_addr = nullptr; bool _is_map_registered = false; }; /** * @brief Implementation class for reading from a file using `read` calls * * Potentially faster than `memory_mapped_source` when only a small portion of the file is read * through the host. */ class direct_read_source : public file_source { public: explicit direct_read_source(char const* filepath) : file_source(filepath) {} std::unique_ptr<buffer> host_read(size_t offset, size_t size) override { lseek(_file.desc(), offset, SEEK_SET); // Clamp length to available data ssize_t const read_size = std::min(size, _file.size() - offset); std::vector<uint8_t> v(read_size); CUDF_EXPECTS(read(_file.desc(), v.data(), read_size) == read_size, "read failed"); return buffer::create(std::move(v)); } size_t host_read(size_t offset, size_t size, uint8_t* dst) override { lseek(_file.desc(), offset, SEEK_SET); // Clamp length to available data auto const read_size = std::min(size, _file.size() - offset); CUDF_EXPECTS(read(_file.desc(), dst, read_size) == static_cast<ssize_t>(read_size), "read failed"); return read_size; } }; /** * @brief Implementation class for reading from a device buffer source */ class device_buffer_source final : public datasource { public: explicit device_buffer_source(cudf::device_span<std::byte const> d_buffer) : _d_buffer{d_buffer} { } size_t host_read(size_t offset, size_t size, uint8_t* dst) override { auto const count = std::min(size, this->size() - offset); auto const stream = cudf::get_default_stream(); CUDF_CUDA_TRY( cudaMemcpyAsync(dst, _d_buffer.data() + offset, count, cudaMemcpyDefault, stream.value())); stream.synchronize(); return count; } std::unique_ptr<buffer> host_read(size_t offset, size_t size) override { auto const count = std::min(size, this->size() - offset); auto const stream = cudf::get_default_stream(); auto h_data = cudf::detail::make_std_vector_async( cudf::device_span<std::byte const>{_d_buffer.data() + offset, count}, stream); stream.synchronize(); return std::make_unique<owning_buffer<std::vector<std::byte>>>(std::move(h_data)); } [[nodiscard]] bool supports_device_read() const override { return true; } std::future<size_t> device_read_async(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { auto const count = std::min(size, this->size() - offset); CUDF_CUDA_TRY( cudaMemcpyAsync(dst, _d_buffer.data() + offset, count, cudaMemcpyDefault, stream.value())); return std::async(std::launch::deferred, [count] { return count; }); } size_t device_read(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { return device_read_async(offset, size, dst, stream).get(); } std::unique_ptr<buffer> device_read(size_t offset, size_t size, rmm::cuda_stream_view stream) override { return std::make_unique<non_owning_buffer>( reinterpret_cast<uint8_t const*>(_d_buffer.data() + offset), size); } [[nodiscard]] size_t size() const override { return _d_buffer.size(); } private: cudf::device_span<std::byte const> _d_buffer; ///< A non-owning view of the existing device data }; /** * @brief Wrapper class for user implemented data sources * * Holds the user-implemented object with a non-owning pointer; The user object is not deleted * when the wrapper object is destroyed. * All API calls are forwarded to the user datasource object. */ class user_datasource_wrapper : public datasource { public: explicit user_datasource_wrapper(datasource* const source) : source(source) {} size_t host_read(size_t offset, size_t size, uint8_t* dst) override { return source->host_read(offset, size, dst); } std::unique_ptr<buffer> host_read(size_t offset, size_t size) override { return source->host_read(offset, size); } [[nodiscard]] bool supports_device_read() const override { return source->supports_device_read(); } [[nodiscard]] bool is_device_read_preferred(size_t size) const override { return source->is_device_read_preferred(size); } size_t device_read(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { return source->device_read(offset, size, dst, stream); } std::unique_ptr<buffer> device_read(size_t offset, size_t size, rmm::cuda_stream_view stream) override { return source->device_read(offset, size, stream); } std::future<size_t> device_read_async(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) override { return source->device_read_async(offset, size, dst, stream); } [[nodiscard]] size_t size() const override { return source->size(); } [[nodiscard]] bool is_empty() const override { return source->is_empty(); } private: datasource* const source; ///< A non-owning pointer to the user-implemented datasource }; } // namespace std::unique_ptr<datasource> datasource::create(std::string const& filepath, size_t offset, size_t size) { #ifdef CUFILE_FOUND if (detail::cufile_integration::is_always_enabled()) { // avoid mmap as GDS is expected to be used for most reads return std::make_unique<direct_read_source>(filepath.c_str()); } #endif // Use our own memory mapping implementation for direct file reads return std::make_unique<memory_mapped_source>(filepath.c_str(), offset, size); } std::unique_ptr<datasource> datasource::create(host_buffer const& buffer) { return create( cudf::host_span<std::byte const>{reinterpret_cast<std::byte const*>(buffer.data), buffer.size}); } std::unique_ptr<datasource> datasource::create(cudf::host_span<std::byte const> buffer) { // Use Arrow IO buffer class for zero-copy reads of host memory return std::make_unique<arrow_io_source>(std::make_shared<arrow::io::BufferReader>( reinterpret_cast<uint8_t const*>(buffer.data()), buffer.size())); } std::unique_ptr<datasource> datasource::create(cudf::device_span<std::byte const> buffer) { return std::make_unique<device_buffer_source>(buffer); } std::unique_ptr<datasource> datasource::create(datasource* source) { // instantiate a wrapper that forwards the calls to the user implementation return std::make_unique<user_datasource_wrapper>(source); } } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/row_selection.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> #include <cstdint> #include <optional> #include <utility> namespace cudf::io::detail { /** * @brief Adjusts the input skip_rows and num_rows options to the actual number of rows to * skip/read, based on the number of rows in the ORC file(s). * * @param skip_rows skip_rows as passed by the user * @param num_rows Optional num_rows as passed by the user * @param num_source_rows number of rows in the ORC file(s) * @return A std::pair containing the number of rows to skip and the number of rows to read * * @throw std::overflow_exception The requested number of rows exceeds the column size limit */ std::pair<uint64_t, size_type> skip_rows_num_rows_from_options( uint64_t skip_rows, std::optional<size_type> const& num_rows, uint64_t num_source_rows); } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/string_parsing.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <io/utilities/parsing_utils.cuh> #include <cudf/types.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> namespace cudf::io { namespace detail { /** * @brief Infers data type for a given JSON string input `data`. * * @throw cudf::logic_error if input size is 0 * @throw cudf::logic_error if date time is not inferred as string * @throw cudf::logic_error if data type inference failed * * @param options View of inference options * @param data JSON string input * @param offset_length_begin The beginning of an offset-length tuple sequence * @param size Size of the string input * @param stream CUDA stream used for device memory operations and kernel launches * @return The inferred data type */ cudf::data_type infer_data_type( cudf::io::json_inference_options_view const& options, device_span<char const> data, thrust::zip_iterator<thrust::tuple<const size_type*, const size_type*>> offset_length_begin, std::size_t const size, rmm::cuda_stream_view stream); } // namespace detail namespace json::detail { /** * @brief Parses the data from an iterator of string views, casting it to the given target data type * * @param data string input base pointer * @param offset_length_begin The beginning of an offset-length tuple sequence * @param col_size The total number of items of this column * @param col_type The column's target data type * @param null_mask A null mask that renders certain items from the input invalid * @param options Settings for controlling the processing behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr The resource to be used for device memory allocation * @return The column that contains the parsed data */ std::unique_ptr<column> parse_data( const char* data, thrust::zip_iterator<thrust::tuple<const size_type*, const size_type*>> offset_length_begin, size_type col_size, data_type col_type, rmm::device_buffer&& null_mask, size_type null_count, cudf::io::parse_options_view const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace json::detail } // namespace cudf::io
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/thread_pool.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once /** * Modified from https://github.com/bshoshany/thread-pool * @copyright Copyright (c) 2021 Barak Shoshany. Licensed under the MIT license. * See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ #include <atomic> // std::atomic #include <chrono> // std::chrono #include <cstdint> // std::int_fast64_t, std::uint_fast32_t #include <functional> // std::function #include <future> // std::future, std::promise #include <memory> // std::shared_ptr, std::unique_ptr #include <mutex> // std::mutex, std::scoped_lock #include <queue> // std::queue #include <thread> // std::this_thread, std::thread #include <type_traits> // std::decay_t, std::enable_if_t, std::is_void_v, std::invoke_result_t #include <utility> // std::move, std::swap namespace cudf { namespace detail { /** * @brief A C++17 thread pool class. The user submits tasks to be executed into a queue. Whenever a * thread becomes available, it pops a task from the queue and executes it. Each task is * automatically assigned a future, which can be used to wait for the task to finish executing * and/or obtain its eventual return value. */ class thread_pool { using ui32 = int; public: /** * @brief Construct a new thread pool. * * @param _thread_count The number of threads to use. The default value is the total number of * hardware threads available, as reported by the implementation. With a hyperthreaded CPU, this * will be twice the number of CPU cores. If the argument is zero, the default value will be used * instead. */ thread_pool(ui32 const& _thread_count = std::thread::hardware_concurrency()) : thread_count(_thread_count ? _thread_count : std::thread::hardware_concurrency()), threads(new std::thread[_thread_count ? _thread_count : std::thread::hardware_concurrency()]) { create_threads(); } /** * @brief Destruct the thread pool. Waits for all tasks to complete, then destroys all threads. * Note that if the variable paused is set to true, then any tasks still in the queue will never * be executed. */ ~thread_pool() { wait_for_tasks(); running = false; destroy_threads(); } /** * @brief Get the number of tasks currently waiting in the queue to be executed by the threads. * * @return The number of queued tasks. */ [[nodiscard]] size_t get_tasks_queued() const { std::scoped_lock const lock(queue_mutex); return tasks.size(); } /** * @brief Get the number of tasks currently being executed by the threads. * * @return The number of running tasks. */ [[nodiscard]] ui32 get_tasks_running() const { return tasks_total - (ui32)get_tasks_queued(); } /** * @brief Get the total number of unfinished tasks - either still in the queue, or running in a * thread. * * @return The total number of tasks. */ [[nodiscard]] ui32 get_tasks_total() const { return tasks_total; } /** * @brief Get the number of threads in the pool. * * @return The number of threads. */ [[nodiscard]] ui32 get_thread_count() const { return thread_count; } /** * @brief Parallelize a loop by splitting it into blocks, submitting each block separately to the * thread pool, and waiting for all blocks to finish executing. The loop will be equivalent to: * for (T i = first_index; i <= last_index; i++) loop(i); * * @tparam T The type of the loop index. Should be a signed or unsigned integer. * @tparam F The type of the function to loop through. * @param first_index The first index in the loop (inclusive). * @param last_index The last index in the loop (inclusive). * @param loop The function to loop through. Should take exactly one argument, the loop index. * @param num_tasks The maximum number of tasks to split the loop into. The default is to use the * number of threads in the pool. */ template <typename T, typename F> void parallelize_loop(T first_index, T last_index, F const& loop, ui32 num_tasks = 0) { if (num_tasks == 0) num_tasks = thread_count; if (last_index < first_index) std::swap(last_index, first_index); size_t total_size = last_index - first_index + 1; size_t block_size = total_size / num_tasks; if (block_size == 0) { block_size = 1; num_tasks = (ui32)total_size > 1 ? (ui32)total_size : 1; } std::atomic<ui32> blocks_running = 0; for (ui32 t = 0; t < num_tasks; t++) { T start = (T)(t * block_size + first_index); T end = (t == num_tasks - 1) ? last_index : (T)((t + 1) * block_size + first_index - 1); blocks_running++; push_task([start, end, &loop, &blocks_running] { for (T i = start; i <= end; i++) loop(i); blocks_running--; }); } while (blocks_running != 0) { sleep_or_yield(); } } /** * @brief Push a function with no arguments or return value into the task queue. * * @tparam F The type of the function. * @param task The function to push. */ template <typename F> void push_task(F const& task) { tasks_total++; { std::scoped_lock const lock(queue_mutex); tasks.push(std::function<void()>(task)); } } /** * @brief Push a function with arguments, but no return value, into the task queue. * @details The function is wrapped inside a lambda in order to hide the arguments, as the tasks * in the queue must be of type std::function<void()>, so they cannot have any arguments or return * value. If no arguments are provided, the other overload will be used, in order to avoid the * (slight) overhead of using a lambda. * * @tparam F The type of the function. * @tparam A The types of the arguments. * @param task The function to push. * @param args The arguments to pass to the function. */ template <typename F, typename... A> void push_task(F const& task, A const&... args) { push_task([task, args...] { task(args...); }); } /** * @brief Reset the number of threads in the pool. Waits for all currently running tasks to be * completed, then destroys all threads in the pool and creates a new thread pool with the new * number of threads. Any tasks that were waiting in the queue before the pool was reset will then * be executed by the new threads. If the pool was paused before resetting it, the new pool will * be paused as well. * * @param _thread_count The number of threads to use. The default value is the total number of * hardware threads available, as reported by the implementation. With a hyperthreaded CPU, this * will be twice the number of CPU cores. If the argument is zero, the default value will be used * instead. */ void reset(ui32 const& _thread_count = std::thread::hardware_concurrency()) { bool was_paused = paused; paused = true; wait_for_tasks(); running = false; destroy_threads(); thread_count = _thread_count ? _thread_count : std::thread::hardware_concurrency(); threads.reset(new std::thread[thread_count]); paused = was_paused; create_threads(); running = true; } /** * @brief Submit a function with zero or more arguments and a return value into the task queue, * and get a future for its eventual returned value. * * @tparam F The type of the function. * @tparam A The types of the zero or more arguments to pass to the function. * @tparam R The return type of the function. * @param task The function to submit. * @param args The zero or more arguments to pass to the function. * @return A future to be used later to obtain the function's returned value, waiting for it to * finish its execution if needed. */ template <typename F, typename... A, typename R = std::invoke_result_t<std::decay_t<F>, std::decay_t<A>...>> std::future<R> submit(F const& task, A const&... args) { std::shared_ptr<std::promise<R>> promise(new std::promise<R>); std::future<R> future = promise->get_future(); push_task([task, args..., promise] { try { if constexpr (std::is_void_v<R>) { task(args...); promise->set_value(); } else { promise->set_value(task(args...)); } } catch (...) { promise->set_exception(std::current_exception()); }; }); return future; } /** * @brief Wait for tasks to be completed. Normally, this function waits for all tasks, both those * that are currently running in the threads and those that are still waiting in the queue. * However, if the variable paused is set to true, this function only waits for the currently * running tasks (otherwise it would wait forever). To wait for a specific task, use submit() * instead, and call the wait() member function of the generated future. */ void wait_for_tasks() { while (true) { if (!paused) { if (tasks_total == 0) break; } else { if (get_tasks_running() == 0) break; } sleep_or_yield(); } } /** * @brief An atomic variable indicating to the workers to pause. When set to true, the workers * temporarily stop popping new tasks out of the queue, although any tasks already executed will * keep running until they are done. Set to false again to resume popping tasks. */ std::atomic<bool> paused = false; /** * @brief The duration, in microseconds, that the worker function should sleep for when it cannot * find any tasks in the queue. If set to 0, then instead of sleeping, the worker function will * execute std::this_thread::yield() if there are no tasks in the queue. The default value is * 1000. */ ui32 sleep_duration = 1000; private: /** * @brief Create the threads in the pool and assign a worker to each thread. */ void create_threads() { for (ui32 i = 0; i < thread_count; i++) { threads[i] = std::thread(&thread_pool::worker, this); } } /** * @brief Destroy the threads in the pool by joining them. */ void destroy_threads() { for (ui32 i = 0; i < thread_count; i++) { threads[i].join(); } } /** * @brief Try to pop a new task out of the queue. * * @param task A reference to the task. Will be populated with a function if the queue is not * empty. * @return true if a task was found, false if the queue is empty. */ bool pop_task(std::function<void()>& task) { std::scoped_lock const lock(queue_mutex); if (tasks.empty()) return false; else { task = std::move(tasks.front()); tasks.pop(); return true; } } /** * @brief Sleep for sleep_duration microseconds. If that variable is set to zero, yield instead. * */ void sleep_or_yield() { if (sleep_duration) std::this_thread::sleep_for(std::chrono::microseconds(sleep_duration)); else std::this_thread::yield(); } /** * @brief A worker function to be assigned to each thread in the pool. Continuously pops tasks out * of the queue and executes them, as long as the atomic variable running is set to true. */ void worker() { while (running) { std::function<void()> task; if (!paused && pop_task(task)) { task(); tasks_total--; } else { sleep_or_yield(); } } } /** * @brief A mutex to synchronize access to the task queue by different threads. */ mutable std::mutex queue_mutex; /** * @brief An atomic variable indicating to the workers to keep running. When set to false, the * workers permanently stop working. */ std::atomic<bool> running = true; /** * @brief A queue of tasks to be executed by the threads. */ std::queue<std::function<void()>> tasks; /** * @brief The number of threads in the pool. */ ui32 thread_count; /** * @brief A smart pointer to manage the memory allocated for the threads. */ std::unique_ptr<std::thread[]> threads; /** * @brief An atomic variable to keep track of the total number of unfinished tasks - either still * in the queue, or running in a thread. */ std::atomic<ui32> tasks_total = 0; }; } // namespace detail } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/arrow_io_source.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/io/arrow_io_source.hpp> #include <arrow/buffer.h> #include <arrow/filesystem/filesystem.h> #include <arrow/result.h> #include <memory> #include <string> namespace cudf::io { /** * @brief Implementation for an owning buffer where `arrow::Buffer` holds the data. */ class arrow_io_buffer : public datasource::buffer { std::shared_ptr<arrow::Buffer> arrow_buffer; public: explicit arrow_io_buffer(std::shared_ptr<arrow::Buffer> arrow_buffer) : arrow_buffer(arrow_buffer) { } [[nodiscard]] size_t size() const override { return arrow_buffer->size(); } [[nodiscard]] uint8_t const* data() const override { return arrow_buffer->data(); } }; arrow_io_source::arrow_io_source(std::string const& arrow_uri) { std::string const uri_start_delimiter = "//"; std::string const uri_end_delimiter = "?"; auto const result = arrow::fs::FileSystemFromUri(arrow_uri); CUDF_EXPECTS(result.ok(), "Failed to generate Arrow Filesystem instance from URI."); filesystem = result.ValueOrDie(); // Parse the path from the URI auto const start = [&]() { auto const delim_start = arrow_uri.find(uri_start_delimiter); return delim_start == std::string::npos ? 0 : delim_start + uri_start_delimiter.size(); }(); auto const end = arrow_uri.find(uri_end_delimiter) - start; auto const path = arrow_uri.substr(start, end); auto const in_stream = filesystem->OpenInputFile(path); CUDF_EXPECTS(in_stream.ok(), "Failed to open Arrow RandomAccessFile"); arrow_file = in_stream.ValueOrDie(); } std::unique_ptr<datasource::buffer> arrow_io_source::host_read(size_t offset, size_t size) { auto const result = arrow_file->ReadAt(offset, size); CUDF_EXPECTS(result.ok(), "Cannot read file data"); return std::make_unique<arrow_io_buffer>(result.ValueOrDie()); } size_t arrow_io_source::host_read(size_t offset, size_t size, uint8_t* dst) { auto const result = arrow_file->ReadAt(offset, size, dst); CUDF_EXPECTS(result.ok(), "Cannot read file data"); return result.ValueOrDie(); } [[nodiscard]] size_t arrow_io_source::size() const { auto const result = arrow_file->GetSize(); CUDF_EXPECTS(result.ok(), "Cannot get file size"); return result.ValueOrDie(); } } // namespace cudf::io
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/column_buffer.cpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file column_buffer.cpp * @brief cuDF-IO column_buffer class implementation */ #include "column_buffer.hpp" #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <rmm/mr/device/per_device_resource.hpp> namespace cudf::io::detail { void gather_column_buffer::allocate_strings_data(rmm::cuda_stream_view stream) { CUDF_EXPECTS(type.id() == type_id::STRING, "allocate_strings_data called for non-string column"); // The contents of _strings will never be directly returned to the user. // Due to the fact that make_strings_column copies the input data to // produce its outputs, _strings is actually a temporary. As a result, we // do not pass the provided mr to the call to // make_zeroed_device_uvector_async here and instead let it use the // default rmm memory resource. _strings = std::make_unique<rmm::device_uvector<string_index_pair>>( cudf::detail::make_zeroed_device_uvector_async<string_index_pair>( size, stream, rmm::mr::get_current_device_resource())); } std::unique_ptr<column> gather_column_buffer::make_string_column_impl(rmm::cuda_stream_view stream) { // make_strings_column allocates new memory, it does not simply move // from the inputs, so we need to pass it the memory resource given to // the buffer on construction so that the memory is allocated using the // resource that the calling code expected. return make_strings_column(*_strings, stream, _mr); } void cudf::io::detail::inline_column_buffer::allocate_strings_data(rmm::cuda_stream_view stream) { CUDF_EXPECTS(type.id() == type_id::STRING, "allocate_strings_data called for non-string column"); // size + 1 for final offset. _string_data will be initialized later. _data = create_data(data_type{type_id::INT32}, size + 1, stream, _mr); } void cudf::io::detail::inline_column_buffer::create_string_data(size_t num_bytes, rmm::cuda_stream_view stream) { _string_data = rmm::device_buffer(num_bytes, stream, _mr); } std::unique_ptr<column> cudf::io::detail::inline_column_buffer::make_string_column_impl( rmm::cuda_stream_view stream) { // no need for copies, just transfer ownership of the data_buffers to the columns auto const state = mask_state::UNALLOCATED; auto str_col = _string_data.is_empty() ? make_empty_column(data_type{type_id::INT8}) : std::make_unique<column>(data_type{type_id::INT8}, string_size(), std::move(_string_data), cudf::detail::create_null_mask(size, state, stream, _mr), state_null_count(state, size), std::vector<std::unique_ptr<column>>{}); auto offsets_col = std::make_unique<column>(data_type{type_to_id<size_type>()}, size + 1, std::move(_data), cudf::detail::create_null_mask(size + 1, state, stream, _mr), state_null_count(state, size + 1), std::vector<std::unique_ptr<column>>{}); return make_strings_column( size, std::move(offsets_col), std::move(str_col), null_count(), std::move(_null_mask)); } namespace { /** * @brief Recursively copy `name` and `user_data` fields of one buffer to another. * * @param buff The old output buffer * @param new_buff The new output buffer */ template <class string_policy> void copy_buffer_data(string_policy const& buff, string_policy& new_buff) { new_buff.name = buff.name; new_buff.user_data = buff.user_data; for (auto const& child : buff.children) { auto& new_child = new_buff.children.emplace_back(string_policy(child.type, child.is_nullable)); copy_buffer_data(child, new_child); } } } // namespace template <class string_policy> void column_buffer_base<string_policy>::create(size_type _size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size = _size; _mr = mr; switch (type.id()) { case type_id::STRING: static_cast<string_policy*>(this)->allocate_strings_data(stream); break; // list columns store a buffer of int32's as offsets to represent // their individual rows case type_id::LIST: _data = create_data(data_type{type_id::INT32}, size, stream, _mr); break; // struct columns store no data themselves. just validity and children. case type_id::STRUCT: break; default: _data = create_data(type, size, stream, _mr); break; } if (is_nullable) { _null_mask = cudf::detail::create_null_mask( size, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), _mr); } } template <class string_policy> string_policy column_buffer_base<string_policy>::empty_like(string_policy const& input) { auto new_buff = string_policy(input.type, input.is_nullable); copy_buffer_data(input, new_buff); return new_buff; } template <class string_policy> std::unique_ptr<column> make_column(column_buffer_base<string_policy>& buffer, column_name_info* schema_info, std::optional<reader_column_schema> const& schema, rmm::cuda_stream_view stream) { if (schema_info != nullptr) { schema_info->name = buffer.name; schema_info->is_nullable = buffer.is_nullable; } switch (buffer.type.id()) { case type_id::STRING: if (schema.value_or(reader_column_schema{}).is_enabled_convert_binary_to_strings()) { if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{"offsets"}); schema_info->children.push_back(column_name_info{"chars"}); } // make_strings_column allocates new memory, it does not simply move // from the inputs, so we need to pass it the memory resource given to // the buffer on construction so that the memory is allocated using the // resource that the calling code expected. return buffer.make_string_column(stream); } else { // convert to binary auto const string_col = buffer.make_string_column(stream); auto const num_rows = string_col->size(); auto const null_count = string_col->null_count(); auto col_content = string_col->release(); // convert to uint8 column, strings are currently stored as int8 auto contents = col_content.children[strings_column_view::chars_column_index].release()->release(); auto data = contents.data.release(); auto uint8_col = std::make_unique<column>( data_type{type_id::UINT8}, data->size(), std::move(*data), rmm::device_buffer{}, 0); if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{"offsets"}); schema_info->children.push_back(column_name_info{"binary"}); } return make_lists_column( num_rows, std::move(col_content.children[strings_column_view::offsets_column_index]), std::move(uint8_col), null_count, std::move(*col_content.null_mask)); } case type_id::LIST: { // make offsets column auto offsets = std::make_unique<column>( data_type{type_id::INT32}, buffer.size, std::move(buffer._data), rmm::device_buffer{}, 0); column_name_info* child_info = nullptr; if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{"offsets"}); schema_info->children.push_back(column_name_info{""}); child_info = &schema_info->children.back(); } CUDF_EXPECTS(not schema.has_value() or schema->get_num_children() > 0, "Invalid schema provided for read, expected child data for list!"); auto const child_schema = schema.has_value() ? std::make_optional<reader_column_schema>(schema->child(0)) : std::nullopt; // make child column CUDF_EXPECTS(buffer.children.size() > 0, "Encountered malformed column_buffer"); auto child = make_column<string_policy>(buffer.children[0], child_info, child_schema, stream); // make the final list column (note : size is the # of offsets, so our actual # of rows is 1 // less) return make_lists_column(buffer.size - 1, std::move(offsets), std::move(child), buffer._null_count, std::move(buffer._null_mask), stream, buffer._mr); } break; case type_id::STRUCT: { std::vector<std::unique_ptr<cudf::column>> output_children; output_children.reserve(buffer.children.size()); for (size_t i = 0; i < buffer.children.size(); ++i) { column_name_info* child_info = nullptr; if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{""}); child_info = &schema_info->children.back(); } CUDF_EXPECTS(not schema.has_value() or schema->get_num_children() > i, "Invalid schema provided for read, expected more child data for struct!"); auto const child_schema = schema.has_value() ? std::make_optional<reader_column_schema>(schema->child(i)) : std::nullopt; output_children.emplace_back( make_column<string_policy>(buffer.children[i], child_info, child_schema, stream)); } return make_structs_column(buffer.size, std::move(output_children), buffer._null_count, std::move(buffer._null_mask), stream, buffer._mr); } break; default: { return std::make_unique<column>(buffer.type, buffer.size, std::move(buffer._data), std::move(buffer._null_mask), buffer._null_count); } } } /** * @copydoc cudf::io::detail::empty_like */ template <class string_policy> std::unique_ptr<column> empty_like(column_buffer_base<string_policy>& buffer, column_name_info* schema_info, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (schema_info != nullptr) { schema_info->name = buffer.name; } switch (buffer.type.id()) { case type_id::LIST: { // make offsets column auto offsets = cudf::make_empty_column(type_id::INT32); column_name_info* child_info = nullptr; if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{"offsets"}); schema_info->children.push_back(column_name_info{""}); child_info = &schema_info->children.back(); } // make child column CUDF_EXPECTS(buffer.children.size() > 0, "Encountered malformed column_buffer"); auto child = cudf::io::detail::empty_like<string_policy>(buffer.children[0], child_info, stream, mr); // make the final list column return make_lists_column( 0, std::move(offsets), std::move(child), 0, rmm::device_buffer{0, stream, mr}, stream, mr); } break; case type_id::STRUCT: { std::vector<std::unique_ptr<cudf::column>> output_children; output_children.reserve(buffer.children.size()); std::transform(buffer.children.begin(), buffer.children.end(), std::back_inserter(output_children), [&](auto& col) { column_name_info* child_info = nullptr; if (schema_info != nullptr) { schema_info->children.push_back(column_name_info{""}); child_info = &schema_info->children.back(); } return cudf::io::detail::empty_like<string_policy>( col, child_info, stream, mr); }); return make_structs_column( 0, std::move(output_children), 0, rmm::device_buffer{0, stream, mr}, stream, mr); } break; default: return cudf::make_empty_column(buffer.type); } } using pointer_type = gather_column_buffer; using string_type = cudf::io::detail::inline_column_buffer; using pointer_column_buffer = column_buffer_base<pointer_type>; using string_column_buffer = column_buffer_base<string_type>; template std::unique_ptr<column> make_column<string_type>( string_column_buffer& buffer, column_name_info* schema_info, std::optional<reader_column_schema> const& schema, rmm::cuda_stream_view stream); template std::unique_ptr<column> make_column<pointer_type>( pointer_column_buffer& buffer, column_name_info* schema_info, std::optional<reader_column_schema> const& schema, rmm::cuda_stream_view stream); template std::unique_ptr<column> empty_like<string_type>(string_column_buffer& buffer, column_name_info* schema_info, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); template std::unique_ptr<column> empty_like<pointer_type>(pointer_column_buffer& buffer, column_name_info* schema_info, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); template class column_buffer_base<pointer_type>; template class column_buffer_base<string_type>; } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/trie.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @brief Serialized trie implementation for C++/CUDA * @file trie.cuh */ #pragma once #include <cudf/utilities/span.hpp> #include <optional> namespace cudf { namespace detail { static constexpr char trie_terminating_character = '\n'; /** * @brief Node in the serialized trie. * * A serialized trie is an array of nodes. Each node represents a matching character, except for the * last child node, which denotes the end of the children list. Children of a node are stored * contiguously. The `children_offset` member is the offset between the node and its first child. * Matching is successful if all characters are matched and the final node is the last character of * a word (i.e. `is_leaf` is true). * */ struct serial_trie_node { int16_t children_offset{-1}; char character{trie_terminating_character}; bool is_leaf{false}; explicit serial_trie_node(char c, bool leaf = false) noexcept : character(c), is_leaf(leaf) {} }; using trie = rmm::device_uvector<serial_trie_node>; using optional_trie = std::optional<trie>; using trie_view = device_span<serial_trie_node const>; inline trie_view make_trie_view(optional_trie const& t) { if (!t) return {}; return trie_view{t->data(), t->size()}; } /** * @brief Creates a serialized trie for cache-friendly string search. * * The resulting trie is a compact array - children array size is equal to the * actual number of children nodes, not the size of the alphabet. * * @param keys Array of strings to insert into the trie * @param stream CUDA stream used for device memory operations and kernel launches. * * @return A host vector of nodes representing the serialized trie */ trie create_serialized_trie(std::vector<std::string> const& keys, rmm::cuda_stream_view stream); /* * @brief Searches for a string in a serialized trie. * * Can be executed on host or device, as long as the data is available * * @param trie Pointer to the array of nodes that make up the trie * @param key Pointer to the start of the string to find * @param key_len Length of the string to find * * @return Boolean value; true if string is found, false otherwise */ __host__ __device__ inline bool serialized_trie_contains(device_span<serial_trie_node const> trie, device_span<char const> key) { if (trie.empty()) { return false; } if (key.empty()) { return trie.front().is_leaf; } auto curr_node = trie.begin() + 1; for (auto curr_key = key.begin(); curr_key < key.end(); ++curr_key) { // Don't jump away from root node if (curr_key != key.begin()) { curr_node += curr_node->children_offset; } // Search for the next character in the array of children nodes // Nodes are sorted - terminate search if the node is larger or equal while (curr_node->character != trie_terminating_character && curr_node->character < *curr_key) { ++curr_node; } // Could not find the next character, done with the search if (curr_node->character != *curr_key) { return false; } } // Even if the node is present, return true only if that node is at the end of a word return curr_node->is_leaf; } } // namespace detail } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/type_inference.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/utilities/column_type_histogram.hpp> #include <io/utilities/string_parsing.hpp> #include <io/utilities/trie.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/utilities/error.hpp> #include <rmm/device_scalar.hpp> #include <cub/block/block_reduce.cuh> #include <cstddef> namespace cudf::io::detail { /** * @brief Custom column_type_histogram sum reduction callable */ struct custom_sum { __device__ inline cudf::io::column_type_histogram operator()( cudf::io::column_type_histogram const& lhs, cudf::io::column_type_histogram const& rhs) { return {lhs.null_count + rhs.null_count, lhs.float_count + rhs.float_count, lhs.datetime_count + rhs.datetime_count, lhs.string_count + rhs.string_count, lhs.negative_small_int_count + rhs.negative_small_int_count, lhs.positive_small_int_count + rhs.positive_small_int_count, lhs.big_int_count + rhs.big_int_count, lhs.bool_count + rhs.bool_count}; } }; /** * @brief Returns true if the input character is a valid digit. * Supports both decimal and hexadecimal digits (uppercase and lowercase). * * @param c Character to check * @param is_hex Whether to check as a hexadecimal * * @return `true` if it is digit-like, `false` otherwise */ __device__ __inline__ bool is_digit(char const c, bool const is_hex = false) { if (c >= '0' && c <= '9') return true; if (is_hex) { if (c >= 'A' && c <= 'F') return true; if (c >= 'a' && c <= 'f') return true; } return false; } /** * @brief Returns true if the counters indicate a potentially valid float. * False positives are possible because positions are not taken into account. * For example, field "e.123-" would match the pattern. */ __device__ __inline__ bool is_like_float(std::size_t len, uint32_t digit_cnt, uint32_t decimal_cnt, uint32_t dash_cnt, uint32_t exponent_cnt) { // Can't have more than one exponent and one decimal point if (decimal_cnt > 1) return false; if (exponent_cnt > 1) return false; // Without the exponent or a decimal point, this is an integer, not a float if (decimal_cnt == 0 && exponent_cnt == 0) return false; // Can only have one '-' per component if (dash_cnt > 1 + exponent_cnt) return false; // If anything other than these characters is present, it's not a float if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false; // Needs at least 1 digit, 2 if exponent is present if (digit_cnt < 1 + exponent_cnt) return false; return true; } /** * @brief Constructs column type histogram for a given column string input `data`. * * @tparam BlockSize Number of threads in each block * @tparam OptionsView Type of inference options view * @tparam ColumnStringIter Iterator type whose `value_type` is a * `thrust::tuple<offset_t, length_t>`, where `offset_t` and `length_t` are of integral type and * `offset_t` needs to be convertible to `std::size_t`. * * @param[in] options View of inference options * @param[in] data JSON string input * @param[in] offset_length_begin The beginning of an offset-length tuple sequence * @param[in] size Size of the string input * @param[out] column_info Histogram of column type counters */ template <int BlockSize, typename OptionsView, typename ColumnStringIter> __global__ void infer_column_type_kernel(OptionsView options, device_span<char const> data, ColumnStringIter offset_length_begin, std::size_t size, cudf::io::column_type_histogram* column_info) { auto thread_type_histogram = cudf::io::column_type_histogram{}; for (auto idx = threadIdx.x + blockDim.x * blockIdx.x; idx < size; idx += gridDim.x * blockDim.x) { auto const field_offset = thrust::get<0>(*(offset_length_begin + idx)); auto const field_len = thrust::get<1>(*(offset_length_begin + idx)); auto const field_begin = data.begin() + field_offset; if (cudf::detail::serialized_trie_contains( options.trie_na, {field_begin, static_cast<std::size_t>(field_len)})) { ++thread_type_histogram.null_count; continue; } // Handling strings if (field_len >= 2 and *field_begin == options.quote_char and field_begin[field_len - 1] == options.quote_char) { ++thread_type_histogram.string_count; continue; } uint32_t digit_count = 0; uint32_t decimal_count = 0; uint32_t slash_count = 0; uint32_t dash_count = 0; uint32_t plus_count = 0; uint32_t colon_count = 0; uint32_t exponent_count = 0; uint32_t other_count = 0; auto const maybe_hex = (field_len > 2 && field_begin[0] == '0' && field_begin[1] == 'x') || (field_len > 3 && field_begin[0] == '-' && field_begin[1] == '0' && field_begin[2] == 'x'); auto const field_end = field_begin + field_len; for (auto pos = field_begin; pos < field_end; ++pos) { if (is_digit(*pos, maybe_hex)) { digit_count++; continue; } // Looking for unique characters that will help identify column types switch (*pos) { case '.': decimal_count++; break; case '-': dash_count++; break; case '+': plus_count++; break; case '/': slash_count++; break; case ':': colon_count++; break; case 'e': case 'E': if (!maybe_hex && pos > field_begin && pos < field_end - 1) exponent_count++; break; default: other_count++; break; } } // All characters must be digits in an integer, except for the starting sign and 'x' in the // hexadecimal prefix auto const int_req_number_cnt = static_cast<uint32_t>(field_len) - ((*field_begin == '-' || *field_begin == '+') && field_len > 1) - maybe_hex; if (cudf::detail::serialized_trie_contains( options.trie_true, {field_begin, static_cast<std::size_t>(field_len)}) || cudf::detail::serialized_trie_contains( options.trie_false, {field_begin, static_cast<std::size_t>(field_len)})) { ++thread_type_histogram.bool_count; } else if (digit_count == int_req_number_cnt) { auto const is_negative = (*field_begin == '-'); char const* data_begin = field_begin + (is_negative || (*field_begin == '+')); cudf::size_type* ptr = cudf::io::gpu::infer_integral_field_counter( data_begin, data_begin + digit_count, is_negative, thread_type_histogram); ++*ptr; } else if (is_like_float( field_len, digit_count, decimal_count, dash_count + plus_count, exponent_count)) { ++thread_type_histogram.float_count; } // All invalid JSON values are treated as string else { ++thread_type_histogram.string_count; } } // grid-stride for loop using BlockReduce = cub::BlockReduce<cudf::io::column_type_histogram, BlockSize>; __shared__ typename BlockReduce::TempStorage temp_storage; auto const block_type_histogram = BlockReduce(temp_storage).Reduce(thread_type_histogram, custom_sum{}); if (threadIdx.x == 0) { atomicAdd(&column_info->null_count, block_type_histogram.null_count); atomicAdd(&column_info->float_count, block_type_histogram.float_count); atomicAdd(&column_info->datetime_count, block_type_histogram.datetime_count); atomicAdd(&column_info->string_count, block_type_histogram.string_count); atomicAdd(&column_info->negative_small_int_count, block_type_histogram.negative_small_int_count); atomicAdd(&column_info->positive_small_int_count, block_type_histogram.positive_small_int_count); atomicAdd(&column_info->big_int_count, block_type_histogram.big_int_count); atomicAdd(&column_info->bool_count, block_type_histogram.bool_count); } } /** * @brief Constructs column type histogram for a given column string input `data`. * * @tparam OptionsView Type of inference options view * @tparam ColumnStringIter Iterator type whose `value_type` is a * `thrust::tuple<offset_t, length_t>`, where `offset_t` and `length_t` are of integral type and * `offset_t` needs to be convertible to `std::size_t`. * * @param options View of inference options * @param data JSON string input * @param offset_length_begin The beginning of an offset-length tuple sequence * @param size Size of the string input * @param stream CUDA stream used for device memory operations and kernel launches * @return A histogram containing column-specific type counters */ template <typename OptionsView, typename ColumnStringIter> cudf::io::column_type_histogram infer_column_type(OptionsView const& options, cudf::device_span<char const> data, ColumnStringIter offset_length_begin, std::size_t const size, rmm::cuda_stream_view stream) { constexpr int block_size = 128; auto const grid_size = (size + block_size - 1) / block_size; auto d_column_info = rmm::device_scalar<cudf::io::column_type_histogram>(stream); CUDF_CUDA_TRY(cudaMemsetAsync( d_column_info.data(), 0, sizeof(cudf::io::column_type_histogram), stream.value())); infer_column_type_kernel<block_size><<<grid_size, block_size, 0, stream.value()>>>( options, data, offset_length_begin, size, d_column_info.data()); return d_column_info.value(stream); } cudf::data_type infer_data_type( cudf::io::json_inference_options_view const& options, device_span<char const> data, thrust::zip_iterator<thrust::tuple<const size_type*, const size_type*>> offset_length_begin, std::size_t const size, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(size != 0, "No data available for data type inference.\n"); auto const h_column_info = infer_column_type(options, data, offset_length_begin, size, stream); auto get_type_id = [&](auto const& cinfo) { auto int_count_total = cinfo.big_int_count + cinfo.negative_small_int_count + cinfo.positive_small_int_count; if (cinfo.null_count == static_cast<cudf::size_type>(size)) { // Entire column is NULL; allocate the smallest amount of memory return type_id::INT8; } else if (cinfo.string_count > 0) { return type_id::STRING; } else if (cinfo.datetime_count > 0) { CUDF_FAIL("Date time is inferred as string.\n"); } else if (cinfo.float_count > 0) { return type_id::FLOAT64; } else if (cinfo.big_int_count == 0 && int_count_total != 0) { return type_id::INT64; } else if (cinfo.big_int_count != 0 && cinfo.negative_small_int_count != 0) { return type_id::STRING; } else if (cinfo.big_int_count != 0) { return type_id::UINT64; } else if (cinfo.bool_count > 0) { return type_id::BOOL8; } CUDF_FAIL("Data type inference failed.\n"); }; return cudf::data_type{get_type_id(h_column_info)}; } } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/time_utils.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/wrappers/timestamps.hpp> namespace cudf { namespace io { /** * @brief Lookup table to compute power of ten */ static const __device__ __constant__ int32_t powers_of_ten[10] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}; struct get_period { template <typename T> constexpr int32_t operator()() { if constexpr (is_chrono<T>()) { return T::period::den; } CUDF_FAIL("Invalid, non chrono type"); } }; /** * @brief Function that translates cuDF time unit to clock frequency */ constexpr int32_t to_clockrate(type_id timestamp_type_id) { return timestamp_type_id == type_id::EMPTY ? 0 : type_dispatcher(data_type{timestamp_type_id}, get_period{}); } } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/output_builder.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <thrust/copy.h> #include <iterator> namespace cudf { template <typename T> class split_device_span_iterator; /** * @brief A device span consisting of two separate device_spans acting as if they were part of a * single span. The first head.size() entries are served from the first span, the remaining * tail.size() entries are served from the second span. * * @tparam T The type of elements in the span. */ template <typename T> class split_device_span { public: using element_type = T; using value_type = std::remove_cv<T>; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using pointer = T*; using iterator = split_device_span_iterator<T>; using const_pointer = T const*; using reference = T&; using const_reference = T const&; split_device_span() = default; explicit constexpr split_device_span(device_span<T> head, device_span<T> tail = {}) : _head{head}, _tail{tail} { } [[nodiscard]] constexpr reference operator[](size_type i) const { return i < _head.size() ? _head[i] : _tail[i - _head.size()]; } [[nodiscard]] constexpr size_type size() const { return _head.size() + _tail.size(); } [[nodiscard]] constexpr device_span<T> head() const { return _head; } [[nodiscard]] constexpr device_span<T> tail() const { return _tail; } [[nodiscard]] constexpr iterator begin() const; [[nodiscard]] constexpr iterator end() const; private: device_span<T> _head; device_span<T> _tail; }; /** * @brief A random access iterator indexing into a split_device_span. * * @tparam T The type of elements in the underlying span. */ template <typename T> class split_device_span_iterator { using it = split_device_span_iterator; public: using size_type = std::size_t; using difference_type = std::ptrdiff_t; using value_type = T; using pointer = value_type*; using reference = value_type&; using iterator_category = std::random_access_iterator_tag; split_device_span_iterator() = default; constexpr split_device_span_iterator(split_device_span<T> span, size_type offset) : _span{span}, _offset{offset} { } [[nodiscard]] constexpr reference operator*() const { return _span[_offset]; } [[nodiscard]] constexpr reference operator[](size_type i) const { return _span[_offset + i]; } [[nodiscard]] constexpr friend bool operator==(it const& lhs, it const& rhs) { return lhs._offset == rhs._offset; } [[nodiscard]] constexpr friend bool operator!=(it const& lhs, it const& rhs) { return !(lhs == rhs); } [[nodiscard]] constexpr friend bool operator<(it const& lhs, it const& rhs) { return lhs._offset < rhs._offset; } [[nodiscard]] constexpr friend bool operator>=(it const& lhs, it const& rhs) { return !(lhs < rhs); } [[nodiscard]] constexpr friend bool operator>(it const& lhs, it const& rhs) { return rhs < lhs; } [[nodiscard]] constexpr friend bool operator<=(it const& lhs, it const& rhs) { return !(lhs > rhs); } [[nodiscard]] constexpr friend difference_type operator-(it const& lhs, it const& rhs) { return lhs._offset - rhs._offset; } [[nodiscard]] constexpr friend it operator+(it lhs, difference_type i) { return lhs += i; } constexpr it& operator+=(difference_type i) { _offset += i; return *this; } constexpr it& operator-=(difference_type i) { return *this += -i; } constexpr it& operator++() { return *this += 1; } constexpr it& operator--() { return *this -= 1; } constexpr it operator++(int) { auto result = *this; ++*this; return result; } constexpr it operator--(int) { auto result = *this; --*this; return result; } private: split_device_span<T> _span; size_type _offset; }; template <typename T> [[nodiscard]] constexpr split_device_span_iterator<T> split_device_span<T>::begin() const { return {*this, 0}; } template <typename T> [[nodiscard]] constexpr split_device_span_iterator<T> split_device_span<T>::end() const { return {*this, size()}; } /** * @brief A chunked storage class that provides preallocated memory for algorithms with known * worst-case output size. It provides functionality to retrieve the next chunk to write to, for * reporting how much memory was actually written and for gathering all previously written outputs * into a single contiguous vector. * * @tparam T The output element type. */ template <typename T> class output_builder { public: using size_type = typename rmm::device_uvector<T>::size_type; /** * @brief Initializes an output builder with given worst-case output size and stream. * * @param max_write_size the maximum number of elements that will be written into a * split_device_span returned from `next_output`. * @param stream the stream used to allocate the first chunk of memory. * @param mr optional, the memory resource to use for allocation. */ output_builder(size_type max_write_size, size_type max_growth, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) : _size{0}, _max_write_size{max_write_size}, _max_growth{max_growth} { CUDF_EXPECTS(max_write_size > 0, "Internal error"); _chunks.emplace_back(0, stream, mr); _chunks.back().reserve(max_write_size * 2, stream); } output_builder(output_builder&&) = delete; output_builder(output_builder const&) = delete; output_builder& operator=(output_builder&&) = delete; output_builder& operator=(output_builder const&) = delete; /** * @brief Returns the next free chunk of `max_write_size` elements from the underlying storage. * Must be followed by a call to `advance_output` after the memory has been written to. * * @param stream The stream to allocate a new chunk of memory with, if necessary. * This should be the stream that will write to the `split_device_span`. * @return A `split_device_span` starting directly after the last output and providing at least * `max_write_size` entries of storage. */ [[nodiscard]] split_device_span<T> next_output(rmm::cuda_stream_view stream) { auto head_it = _chunks.end() - (_chunks.size() > 1 and _chunks.back().is_empty() ? 2 : 1); auto head_span = get_free_span(*head_it); if (head_span.size() >= _max_write_size) { return split_device_span<T>{head_span}; } if (head_it == _chunks.end() - 1) { // insert a new device_uvector of double size auto const next_chunk_size = std::min(_max_growth * _max_write_size, 2 * _chunks.back().capacity()); _chunks.emplace_back(0, stream, _chunks.back().memory_resource()); _chunks.back().reserve(next_chunk_size, stream); } auto tail_span = get_free_span(_chunks.back()); CUDF_EXPECTS(head_span.size() + tail_span.size() >= _max_write_size, "Internal error"); return split_device_span<T>{head_span, tail_span}; } /** * @brief Advances the output sizes after a `split_device_span` returned from `next_output` was * written to. * * @param actual_size The number of elements that were written to the result of the previous * `next_output` call. * @param stream The stream on which to resize the vectors. Since this function will not * reallocate, this only changes the stream of the internally stored vectors, * impacting their subsequent copy and destruction behavior. */ void advance_output(size_type actual_size, rmm::cuda_stream_view stream) { CUDF_EXPECTS(actual_size <= _max_write_size, "Internal error"); if (_chunks.size() < 2) { auto const new_size = _chunks.back().size() + actual_size; inplace_resize(_chunks.back(), new_size, stream); } else { auto& tail = _chunks.back(); auto& prev = _chunks.rbegin()[1]; auto const prev_advance = std::min(actual_size, prev.capacity() - prev.size()); auto const tail_advance = actual_size - prev_advance; inplace_resize(prev, prev.size() + prev_advance, stream); inplace_resize(tail, tail.size() + tail_advance, stream); } _size += actual_size; } /** * @brief Returns the first element that was written to the output. * Requires a previous call to `next_output` and `advance_output` and `size() > 0`. * @param stream The stream used to access the element. * @return The first element that was written to the output. */ [[nodiscard]] T front_element(rmm::cuda_stream_view stream) const { return _chunks.front().front_element(stream); } /** * @brief Returns the last element that was written to the output. * Requires a previous call to `next_output` and `advance_output` and `size() > 0`. * @param stream The stream used to access the element. * @return The last element that was written to the output. */ [[nodiscard]] T back_element(rmm::cuda_stream_view stream) const { auto const& last_nonempty_chunk = _chunks.size() > 1 and _chunks.back().is_empty() ? _chunks.rbegin()[1] : _chunks.back(); return last_nonempty_chunk.back_element(stream); } [[nodiscard]] size_type size() const { return _size; } /** * @brief Gathers all previously written outputs into a single contiguous vector. * * @param stream The stream used to allocate and gather the output vector. All previous write * operations to the output buffer must have finished or happened on this stream. * @param mr The memory resource used to allocate the output vector. * @return The output vector. */ rmm::device_uvector<T> gather(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { rmm::device_uvector<T> output{size(), stream, mr}; auto output_it = output.begin(); for (auto const& chunk : _chunks) { output_it = thrust::copy( rmm::exec_policy_nosync(stream), chunk.begin(), chunk.begin() + chunk.size(), output_it); } return output; } private: /** * @brief Resizes a vector without reallocating * * @param vector The vector * @param new_size The new size. Must be smaller than the vector's capacity * @param stream The stream on which to resize the vector. Since this function will not * reallocate, this only changes the stream of `vector`, impacting its subsequent * copy and destruction behavior. */ static void inplace_resize(rmm::device_uvector<T>& vector, size_type new_size, rmm::cuda_stream_view stream) { CUDF_EXPECTS(new_size <= vector.capacity(), "Internal error"); vector.resize(new_size, stream); } /** * @brief Returns the span consisting of all currently unused elements in the vector * (`i >= size() and i < capacity()`). * * @param vector The vector. * @return The span of unused elements. */ static device_span<T> get_free_span(rmm::device_uvector<T>& vector) { return device_span<T>{vector.data() + vector.size(), vector.capacity() - vector.size()}; } size_type _size; size_type _max_write_size; size_type _max_growth; std::vector<rmm::device_uvector<T>> _chunks; }; } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/config_utils.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/utilities/logger.hpp> #include <sstream> #include <string> namespace cudf::io::detail { /** * @brief Returns the value of the environment variable, or a default value if the variable is not * present. */ template <typename T> T getenv_or(std::string_view env_var_name, T default_val) { auto const env_val = std::getenv(env_var_name.data()); if (env_val != nullptr) { CUDF_LOG_INFO("Environment variable {} read as {}", env_var_name, env_val); } else { CUDF_LOG_INFO( "Environment variable {} is not set, using default value {}", env_var_name, default_val); } if (env_val == nullptr) { return default_val; } std::stringstream sstream(env_val); T converted_val; sstream >> converted_val; return converted_val; } namespace cufile_integration { /** * @brief Returns true if cuFile and its compatibility mode are enabled. */ bool is_always_enabled(); /** * @brief Returns true if only direct IO through cuFile is enabled (compatibility mode is disabled). */ bool is_gds_enabled(); /** * @brief Returns true if KvikIO is enabled. */ bool is_kvikio_enabled(); } // namespace cufile_integration namespace nvcomp_integration { /** * @brief Returns true if all nvCOMP uses are enabled. */ bool is_all_enabled(); /** * @brief Returns true if stable nvCOMP use is enabled. */ bool is_stable_enabled(); } // namespace nvcomp_integration } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/column_buffer.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file column_buffer.hpp * @brief cuDF-IO Column-backing buffer utilities */ #pragma once #include <cudf/column/column_factories.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/io/types.hpp> #include <cudf/null_mask.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <thrust/pair.h> namespace cudf { namespace io { namespace detail { /** * @brief Creates a `device_buffer` for holding `column` data. * * @param type The intended data type to populate * @param size The number of elements to be represented by the mask * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned device_buffer * * @return `rmm::device_buffer` Device buffer allocation */ inline rmm::device_buffer create_data(data_type type, size_type size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::size_t data_size = size_of(type) * size; rmm::device_buffer data(data_size, stream, mr); CUDF_CUDA_TRY(cudaMemsetAsync(data.data(), 0, data_size, stream.value())); return data; } using string_index_pair = thrust::pair<char const*, size_type>; // forward declare friend functions template <typename string_policy> class column_buffer_base; /** * @brief Creates a column from an existing set of device memory buffers. * * @throws std::bad_alloc if device memory allocation fails * * @param buffer Column buffer descriptors * @param schema_info Schema information for the column to write optionally. * @param schema Optional schema used to control string to binary conversions. * @param stream CUDA stream used for device memory operations and kernel launches. * * @return `std::unique_ptr<cudf::column>` Column from the existing device data */ template <class string_policy> std::unique_ptr<column> make_column(column_buffer_base<string_policy>& buffer, column_name_info* schema_info, std::optional<reader_column_schema> const& schema, rmm::cuda_stream_view stream); template <typename string_policy> class column_buffer_base { public: column_buffer_base() = default; // construct without a known size. call create() later to actually allocate memory column_buffer_base(data_type _type, bool _is_nullable) : type(_type), is_nullable(_is_nullable) {} column_buffer_base(data_type _type, size_type _size, bool _is_nullable, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : column_buffer_base(_type, _is_nullable) { } // move constructor column_buffer_base(column_buffer_base&& col) = default; column_buffer_base& operator=(column_buffer_base&& col) = default; // copy constructor column_buffer_base(column_buffer_base const& col) = delete; column_buffer_base& operator=(column_buffer_base const& col) = delete; // instantiate a column of known type with a specified size. Allows deferred creation for // preprocessing steps such as in the Parquet reader void create(size_type _size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); // Create a new column_buffer that has empty data but with the same basic information as the // input column, including same type, nullability, name, and user_data. static string_policy empty_like(string_policy const& input); void set_null_mask(rmm::device_buffer&& mask) { _null_mask = std::move(mask); } template <typename T = uint32_t> auto null_mask() { return static_cast<T*>(_null_mask.data()); } auto null_mask_size() { return _null_mask.size(); } auto& null_count() { return _null_count; } auto data() { return static_cast<string_policy*>(this)->data_impl(); } auto data() const { return static_cast<string_policy const*>(this)->data_impl(); } auto data_size() const { return static_cast<string_policy const*>(this)->data_size_impl(); } std::unique_ptr<column> make_string_column(rmm::cuda_stream_view stream) { return static_cast<string_policy*>(this)->make_string_column_impl(stream); } protected: rmm::device_buffer _data{}; rmm::device_buffer _null_mask{}; size_type _null_count{0}; rmm::mr::device_memory_resource* _mr; public: data_type type{type_id::EMPTY}; bool is_nullable{false}; size_type size{0}; uint32_t user_data{0}; // arbitrary user data std::string name; std::vector<string_policy> children; friend std::unique_ptr<column> make_column<string_policy>( column_buffer_base& buffer, column_name_info* schema_info, std::optional<reader_column_schema> const& schema, rmm::cuda_stream_view stream); }; // column buffer that uses a string_index_pair for strings data, requiring a gather step when // creating a string column class gather_column_buffer : public column_buffer_base<gather_column_buffer> { public: gather_column_buffer() = default; // construct without a known size. call create() later to actually allocate memory gather_column_buffer(data_type _type, bool _is_nullable) : column_buffer_base<gather_column_buffer>(_type, _is_nullable) { } gather_column_buffer(data_type _type, size_type _size, bool _is_nullable, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : column_buffer_base<gather_column_buffer>(_type, _size, _is_nullable, stream, mr) { create(_size, stream, mr); } void allocate_strings_data(rmm::cuda_stream_view stream); void* data_impl() { return _strings ? _strings->data() : _data.data(); } void const* data_impl() const { return _strings ? _strings->data() : _data.data(); } size_t data_size_impl() const { return _strings ? _strings->size() : _data.size(); } std::unique_ptr<column> make_string_column_impl(rmm::cuda_stream_view stream); public: std::unique_ptr<rmm::device_uvector<string_index_pair>> _strings; }; // column buffer that stores string data internally which can be passed directly when // creating a string column class inline_column_buffer : public column_buffer_base<inline_column_buffer> { public: inline_column_buffer() = default; // construct without a known size. call create() later to actually allocate memory inline_column_buffer(data_type _type, bool _is_nullable) : column_buffer_base<inline_column_buffer>(_type, _is_nullable) { } inline_column_buffer(data_type _type, size_type _size, bool _is_nullable, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : column_buffer_base<inline_column_buffer>(_type, _size, _is_nullable, stream, mr) { create(_size, stream, mr); } void allocate_strings_data(rmm::cuda_stream_view stream); void* data_impl() { return _data.data(); } void const* data_impl() const { return _data.data(); } size_t data_size_impl() const { return _data.size(); } std::unique_ptr<column> make_string_column_impl(rmm::cuda_stream_view stream); void create_string_data(size_t num_bytes, rmm::cuda_stream_view stream); void* string_data() { return _string_data.data(); } void const* string_data() const { return _string_data.data(); } size_t string_size() const { return _string_data.size(); } private: rmm::device_buffer _string_data{}; }; using column_buffer = gather_column_buffer; /** * @brief Creates an equivalent empty column from an existing set of device memory buffers. * * This function preserves nested column type information by producing complete/identical * column hierarchies. * * @throws std::bad_alloc if device memory allocation fails * * @param buffer Column buffer descriptors * @param schema_info Schema information for the column to write optionally. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the returned column's device memory * * @return `std::unique_ptr<cudf::column>` Column from the existing device data */ template <class string_policy> std::unique_ptr<column> empty_like(column_buffer_base<string_policy>& buffer, column_name_info* schema_info, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/parsing_utils.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/io/types.hpp> #include <cudf/utilities/error.hpp> #include <thrust/pair.h> #include <rmm/device_buffer.hpp> #include <algorithm> namespace cudf { namespace io { namespace { // When processing the input in chunks, this is the maximum size of each chunk. // Only one chunk is loaded on the GPU at a time, so this value is chosen to // be small enough to fit on the GPU in most cases. constexpr size_t max_chunk_bytes = 256 * 1024 * 1024; // 256MB constexpr int bytes_per_find_thread = 64; using pos_key_pair = thrust::pair<uint64_t, char>; template <typename T> constexpr T divCeil(T dividend, T divisor) noexcept { return (dividend + divisor - 1) / divisor; } /** * @brief Sets the specified element of the array to the passed value */ template <class T, class V> __device__ __forceinline__ void setElement(T* array, cudf::size_type idx, T const& t, V const&) { array[idx] = t; } /** * @brief Sets the specified element of the array of pairs using the two passed * parameters. */ template <class T, class V> __device__ __forceinline__ void setElement(thrust::pair<T, V>* array, cudf::size_type idx, T const& t, V const& v) { array[idx] = {t, v}; } /** * @brief Overloads the setElement() functions for void* arrays. * Does not do anything, indexing is not allowed with void* arrays. */ template <class T, class V> __device__ __forceinline__ void setElement(void*, cudf::size_type, T const&, V const&) { } /** * @brief CUDA kernel that finds all occurrences of a character in the given * character array. If the 'positions' parameter is not void*, * positions of all occurrences are stored in the output array. * * @param[in] data Pointer to the input character array * @param[in] size Number of bytes in the input array * @param[in] offset Offset to add to the output positions * @param[in] key Character to find in the array * @param[in,out] count Pointer to the number of found occurrences * @param[out] positions Array containing the output positions */ template <class T> __global__ void count_and_set_positions(char const* data, uint64_t size, uint64_t offset, char const key, cudf::size_type* count, T* positions) { // thread IDs range per block, so also need the block id auto const tid = cudf::detail::grid_1d::global_thread_id(); auto const did = tid * bytes_per_find_thread; char const* raw = (data + did); long const byteToProcess = ((did + bytes_per_find_thread) < size) ? bytes_per_find_thread : (size - did); // Process the data for (long i = 0; i < byteToProcess; i++) { if (raw[i] == key) { auto const idx = atomicAdd(count, (cudf::size_type)1); setElement(positions, idx, did + offset + i, key); } } } } // namespace template <class T> cudf::size_type find_all_from_set(device_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, T* positions, rmm::cuda_stream_view stream) { int block_size = 0; // suggested thread count to use int min_grid_size = 0; // minimum block count required CUDF_CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>)); int const grid_size = divCeil(data.size(), (size_t)block_size); auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>( 1, stream, rmm::mr::get_current_device_resource()); for (char key : keys) { count_and_set_positions<T><<<grid_size, block_size, 0, stream.value()>>>( data.data(), data.size(), result_offset, key, d_count.data(), positions); } return cudf::detail::make_std_vector_sync(d_count, stream)[0]; } template <class T> cudf::size_type find_all_from_set(host_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, T* positions, rmm::cuda_stream_view stream) { rmm::device_buffer d_chunk(std::min(max_chunk_bytes, data.size()), stream); auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>( 1, stream, rmm::mr::get_current_device_resource()); int block_size = 0; // suggested thread count to use int min_grid_size = 0; // minimum block count required CUDF_CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>)); size_t const chunk_count = divCeil(data.size(), max_chunk_bytes); for (size_t ci = 0; ci < chunk_count; ++ci) { auto const chunk_offset = ci * max_chunk_bytes; auto const h_chunk = data.data() + chunk_offset; int const chunk_bytes = std::min((size_t)(data.size() - ci * max_chunk_bytes), max_chunk_bytes); auto const chunk_bits = divCeil(chunk_bytes, bytes_per_find_thread); int const grid_size = divCeil(chunk_bits, block_size); // Copy chunk to device CUDF_CUDA_TRY( cudaMemcpyAsync(d_chunk.data(), h_chunk, chunk_bytes, cudaMemcpyDefault, stream.value())); for (char key : keys) { count_and_set_positions<T> <<<grid_size, block_size, 0, stream.value()>>>(static_cast<char*>(d_chunk.data()), chunk_bytes, chunk_offset + result_offset, key, d_count.data(), positions); } } return cudf::detail::make_std_vector_sync(d_count, stream)[0]; } template cudf::size_type find_all_from_set<uint64_t>(device_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, uint64_t* positions, rmm::cuda_stream_view stream); template cudf::size_type find_all_from_set<pos_key_pair>(device_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, pos_key_pair* positions, rmm::cuda_stream_view stream); template cudf::size_type find_all_from_set<uint64_t>(host_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, uint64_t* positions, rmm::cuda_stream_view stream); template cudf::size_type find_all_from_set<pos_key_pair>(host_span<char const> data, std::vector<char> const& keys, uint64_t result_offset, pos_key_pair* positions, rmm::cuda_stream_view stream); cudf::size_type count_all_from_set(device_span<char const> data, std::vector<char> const& keys, rmm::cuda_stream_view stream) { return find_all_from_set<void>(data, keys, 0, nullptr, stream); } cudf::size_type count_all_from_set(host_span<char const> data, std::vector<char> const& keys, rmm::cuda_stream_view stream) { return find_all_from_set<void>(data, keys, 0, nullptr, stream); } } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/hostdevice_vector.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "config_utils.hpp" #include "hostdevice_span.hpp" #include <cudf/detail/utilities/pinned_host_vector.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <thrust/host_vector.h> #include <variant> namespace cudf::detail { inline bool hostdevice_vector_uses_pageable_buffer() { static bool const use_pageable = cudf::io::detail::getenv_or("LIBCUDF_IO_PREFER_PAGEABLE_TMP_MEMORY", 0); return use_pageable; } /** * @brief A helper class that wraps fixed-length device memory for the GPU, and * a mirror host pinned memory for the CPU. * * This abstraction allocates a specified fixed chunk of device memory that can * initialized upfront, or gradually initialized as required. * The host-side memory can be used to manipulate data on the CPU before and * after operating on the same data on the GPU. */ template <typename T> class hostdevice_vector { public: using value_type = T; hostdevice_vector() : hostdevice_vector(0, cudf::get_default_stream()) {} explicit hostdevice_vector(size_t size, rmm::cuda_stream_view stream) : hostdevice_vector(size, size, stream) { } explicit hostdevice_vector(size_t initial_size, size_t max_size, rmm::cuda_stream_view stream) : d_data(0, stream) { CUDF_EXPECTS(initial_size <= max_size, "initial_size cannot be larger than max_size"); if (hostdevice_vector_uses_pageable_buffer()) { h_data_owner = thrust::host_vector<T>(); } else { h_data_owner = cudf::detail::pinned_host_vector<T>(); } std::visit( [&](auto&& v) { v.reserve(max_size); v.resize(initial_size); host_data = v.data(); }, h_data_owner); current_size = initial_size; d_data.resize(max_size, stream); } void push_back(T const& data) { CUDF_EXPECTS(size() < capacity(), "Cannot insert data into hostdevice_vector because capacity has been exceeded."); host_data[current_size++] = data; } [[nodiscard]] size_t capacity() const noexcept { return d_data.size(); } [[nodiscard]] size_t size() const noexcept { return current_size; } [[nodiscard]] size_t size_bytes() const noexcept { return sizeof(T) * size(); } [[nodiscard]] bool empty() const noexcept { return size() == 0; } [[nodiscard]] T& operator[](size_t i) { return host_data[i]; } [[nodiscard]] T const& operator[](size_t i) const { return host_data[i]; } [[nodiscard]] T* host_ptr(size_t offset = 0) { return host_data + offset; } [[nodiscard]] T const* host_ptr(size_t offset = 0) const { return host_data + offset; } [[nodiscard]] T* begin() { return host_ptr(); } [[nodiscard]] T const* begin() const { return host_ptr(); } [[nodiscard]] T* end() { return host_ptr(size()); } [[nodiscard]] T const* end() const { return host_ptr(size()); } [[nodiscard]] T* device_ptr(size_t offset = 0) { return d_data.data() + offset; } [[nodiscard]] T const* device_ptr(size_t offset = 0) const { return d_data.data() + offset; } [[nodiscard]] T* d_begin() { return device_ptr(); } [[nodiscard]] T const* d_begin() const { return device_ptr(); } [[nodiscard]] T* d_end() { return device_ptr(size()); } [[nodiscard]] T const* d_end() const { return device_ptr(size()); } /** * @brief Returns the specified element from device memory * * @note This function incurs a device to host memcpy and should be used sparingly. * @note This function synchronizes `stream`. * * @throws rmm::out_of_range exception if `element_index >= size()` * * @param element_index Index of the desired element * @param stream The stream on which to perform the copy * @return The value of the specified element */ [[nodiscard]] T element(std::size_t element_index, rmm::cuda_stream_view stream) const { return d_data.element(element_index, stream); } operator cudf::host_span<T>() { return {host_ptr(), size()}; } operator cudf::host_span<T const>() const { return {host_ptr(), size()}; } operator cudf::device_span<T>() { return {device_ptr(), size()}; } operator cudf::device_span<T const>() const { return {device_ptr(), size()}; } void host_to_device_async(rmm::cuda_stream_view stream) { CUDF_CUDA_TRY( cudaMemcpyAsync(device_ptr(), host_ptr(), size_bytes(), cudaMemcpyDefault, stream.value())); } void host_to_device_sync(rmm::cuda_stream_view stream) { host_to_device_async(stream); stream.synchronize(); } void device_to_host_async(rmm::cuda_stream_view stream) { CUDF_CUDA_TRY( cudaMemcpyAsync(host_ptr(), device_ptr(), size_bytes(), cudaMemcpyDefault, stream.value())); } void device_to_host_sync(rmm::cuda_stream_view stream) { device_to_host_async(stream); stream.synchronize(); } /** * @brief Converts a hostdevice_vector into a hostdevice_span. * * @return A typed hostdevice_span of the hostdevice_vector's data */ [[nodiscard]] operator hostdevice_span<T>() { return hostdevice_span<T>{host_data, d_data.data(), size()}; } /** * @brief Converts a part of a hostdevice_vector into a hostdevice_span. * * @param offset The offset of the first element in the subspan * @param count The number of elements in the subspan * @return A typed hostdevice_span of the hostdevice_vector's data */ [[nodiscard]] hostdevice_span<T> subspan(size_t offset, size_t count) { CUDF_EXPECTS(offset < d_data.size(), "Offset is out of bounds."); CUDF_EXPECTS(count <= d_data.size() - offset, "The span with given offset and count is out of bounds."); return hostdevice_span<T>{host_data + offset, d_data.data() + offset, count}; } private: std::variant<thrust::host_vector<T>, cudf::detail::pinned_host_vector<T>> h_data_owner; T* host_data = nullptr; size_t current_size = 0; rmm::device_uvector<T> d_data; }; /** * @brief Wrapper around hostdevice_vector to enable two-dimensional indexing. * * Does not incur additional allocations. */ template <typename T> class hostdevice_2dvector { public: hostdevice_2dvector(size_t rows, size_t columns, rmm::cuda_stream_view stream) : _size{rows, columns}, _data{rows * columns, stream} { } operator device_2dspan<T>() { return {_data.device_ptr(), _size}; } operator device_2dspan<T const>() const { return {_data.device_ptr(), _size}; } device_2dspan<T> device_view() { return static_cast<device_2dspan<T>>(*this); } device_2dspan<T> device_view() const { return static_cast<device_2dspan<T const>>(*this); } operator host_2dspan<T>() { return {_data.host_ptr(), _size}; } operator host_2dspan<T const>() const { return {_data.host_ptr(), _size}; } host_2dspan<T> host_view() { return static_cast<host_2dspan<T>>(*this); } host_2dspan<T> host_view() const { return static_cast<host_2dspan<T const>>(*this); } host_span<T> operator[](size_t row) { return {_data.host_ptr() + host_2dspan<T>::flatten_index(row, 0, _size), _size.second}; } host_span<T const> operator[](size_t row) const { return {_data.host_ptr() + host_2dspan<T>::flatten_index(row, 0, _size), _size.second}; } auto size() const noexcept { return _size; } auto count() const noexcept { return _size.first * _size.second; } auto is_empty() const noexcept { return count() == 0; } T* base_host_ptr(size_t offset = 0) { return _data.host_ptr(offset); } T* base_device_ptr(size_t offset = 0) { return _data.device_ptr(offset); } T const* base_host_ptr(size_t offset = 0) const { return _data.host_ptr(offset); } T const* base_device_ptr(size_t offset = 0) const { return _data.device_ptr(offset); } size_t size_bytes() const noexcept { return _data.size_bytes(); } void host_to_device_async(rmm::cuda_stream_view stream) { _data.host_to_device_async(stream); } void host_to_device_sync(rmm::cuda_stream_view stream) { _data.host_to_device_sync(stream); } void device_to_host_async(rmm::cuda_stream_view stream) { _data.device_to_host_async(stream); } void device_to_host_sync(rmm::cuda_stream_view stream) { _data.device_to_host_sync(stream); } private: hostdevice_vector<T> _data; typename host_2dspan<T>::size_type _size; }; } // namespace cudf::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/utilities/config_utils.cpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "config_utils.hpp" #include <cudf/utilities/error.hpp> #include <cstdlib> #include <string> namespace cudf::io::detail { namespace cufile_integration { namespace { /** * @brief Defines which cuFile usage to enable. */ enum class usage_policy : uint8_t { OFF, GDS, ALWAYS, KVIKIO }; /** * @brief Get the current usage policy. */ usage_policy get_env_policy() { static auto const env_val = getenv_or<std::string>("LIBCUDF_CUFILE_POLICY", "KVIKIO"); if (env_val == "OFF") return usage_policy::OFF; if (env_val == "GDS") return usage_policy::GDS; if (env_val == "ALWAYS") return usage_policy::ALWAYS; if (env_val == "KVIKIO") return usage_policy::KVIKIO; CUDF_FAIL("Invalid LIBCUDF_CUFILE_POLICY value: " + env_val); } } // namespace bool is_always_enabled() { return get_env_policy() == usage_policy::ALWAYS; } bool is_gds_enabled() { return is_always_enabled() or get_env_policy() == usage_policy::GDS; } bool is_kvikio_enabled() { return get_env_policy() == usage_policy::KVIKIO; } } // namespace cufile_integration namespace nvcomp_integration { namespace { /** * @brief Defines which nvCOMP usage to enable. */ enum class usage_policy : uint8_t { OFF, STABLE, ALWAYS }; /** * @brief Get the current usage policy. */ usage_policy get_env_policy() { static auto const env_val = getenv_or<std::string>("LIBCUDF_NVCOMP_POLICY", "STABLE"); if (env_val == "OFF") return usage_policy::OFF; if (env_val == "STABLE") return usage_policy::STABLE; if (env_val == "ALWAYS") return usage_policy::ALWAYS; CUDF_FAIL("Invalid LIBCUDF_NVCOMP_POLICY value: " + env_val); } } // namespace bool is_all_enabled() { return get_env_policy() == usage_policy::ALWAYS; } bool is_stable_enabled() { return is_all_enabled() or get_env_policy() == usage_policy::STABLE; } } // namespace nvcomp_integration } // namespace cudf::io::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/data_chunk_source_factories.cpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "io/text/device_data_chunks.hpp" #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/pinned_host_vector.hpp> #include <cudf/io/text/data_chunk_source_factories.hpp> #include <rmm/device_buffer.hpp> #include <thrust/host_vector.h> #include <fstream> namespace cudf::io::text { namespace { struct host_ticket { cudaEvent_t event; cudf::detail::pinned_host_vector<char> buffer; }; /** * @brief A reader which produces owning chunks of device memory which contain a copy of the data * from an istream. */ class datasource_chunk_reader : public data_chunk_reader { constexpr static int num_tickets = 2; public: datasource_chunk_reader(datasource* source) : _source(source) { // create an event to track the completion of the last device-to-host copy. for (auto& ticket : _tickets) { CUDF_CUDA_TRY(cudaEventCreate(&(ticket.event))); } } ~datasource_chunk_reader() override { for (auto& ticket : _tickets) { CUDF_CUDA_TRY(cudaEventDestroy(ticket.event)); } } void skip_bytes(std::size_t size) override { _offset += std::min(_source->size() - _offset, size); }; std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t read_size, rmm::cuda_stream_view stream) override { CUDF_FUNC_RANGE(); read_size = std::min(_source->size() - _offset, read_size); // get a device buffer containing read data on the device. auto chunk = rmm::device_uvector<char>(read_size, stream); if (_source->supports_device_read() && _source->is_device_read_preferred(read_size)) { _source->device_read(_offset, read_size, reinterpret_cast<uint8_t*>(chunk.data()), stream); } else { auto& h_ticket = _tickets[_next_ticket_idx]; _next_ticket_idx = (_next_ticket_idx + 1) % num_tickets; // synchronize on the last host-to-device copy, so we don't clobber the host buffer. CUDF_CUDA_TRY(cudaEventSynchronize(h_ticket.event)); // resize the host buffer as necessary to contain the requested number of bytes if (h_ticket.buffer.size() < read_size) { h_ticket.buffer.resize(read_size); } _source->host_read(_offset, read_size, reinterpret_cast<uint8_t*>(h_ticket.buffer.data())); // copy the host-pinned data on to device CUDF_CUDA_TRY(cudaMemcpyAsync( chunk.data(), h_ticket.buffer.data(), read_size, cudaMemcpyDefault, stream.value())); // record the host-to-device copy. CUDF_CUDA_TRY(cudaEventRecord(h_ticket.event, stream.value())); } _offset += read_size; // return the device buffer so it can be processed. return std::make_unique<device_uvector_data_chunk>(std::move(chunk)); } private: std::size_t _offset = 0; std::size_t _next_ticket_idx = 0; std::array<host_ticket, num_tickets> _tickets{}; datasource* _source; }; /** * @brief A reader which produces owning chunks of device memory which contain a copy of the data * from an istream. */ class istream_data_chunk_reader : public data_chunk_reader { constexpr static int num_tickets = 2; public: istream_data_chunk_reader(std::unique_ptr<std::istream> datastream) : _datastream(std::move(datastream)) { // create an event to track the completion of the last device-to-host copy. for (auto& ticket : _tickets) { CUDF_CUDA_TRY(cudaEventCreate(&(ticket.event))); } } ~istream_data_chunk_reader() override { for (auto& ticket : _tickets) { CUDF_CUDA_TRY(cudaEventDestroy(ticket.event)); } } void skip_bytes(std::size_t size) override { _datastream->ignore(size); }; std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t read_size, rmm::cuda_stream_view stream) override { CUDF_FUNC_RANGE(); auto& h_ticket = _tickets[_next_ticket_idx]; _next_ticket_idx = (_next_ticket_idx + 1) % num_tickets; // synchronize on the last host-to-device copy, so we don't clobber the host buffer. CUDF_CUDA_TRY(cudaEventSynchronize(h_ticket.event)); // resize the host buffer as necessary to contain the requested number of bytes if (h_ticket.buffer.size() < read_size) { h_ticket.buffer.resize(read_size); } // read data from the host istream in to the pinned host memory buffer _datastream->read(h_ticket.buffer.data(), read_size); // adjust the read size to reflect how many bytes were actually read from the data stream read_size = _datastream->gcount(); // get a device buffer containing read data on the device. auto chunk = rmm::device_uvector<char>(read_size, stream); // copy the host-pinned data on to device CUDF_CUDA_TRY(cudaMemcpyAsync( chunk.data(), h_ticket.buffer.data(), read_size, cudaMemcpyDefault, stream.value())); // record the host-to-device copy. CUDF_CUDA_TRY(cudaEventRecord(h_ticket.event, stream.value())); // return the device buffer so it can be processed. return std::make_unique<device_uvector_data_chunk>(std::move(chunk)); } private: std::size_t _next_ticket_idx = 0; std::array<host_ticket, num_tickets> _tickets{}; std::unique_ptr<std::istream> _datastream; }; /** * @brief A reader which produces owning chunks of device memory which contain a copy of the data * from a host span. */ class host_span_data_chunk_reader : public data_chunk_reader { public: host_span_data_chunk_reader(cudf::host_span<char const> data) : _data(data) {} void skip_bytes(std::size_t read_size) override { _position += std::min(read_size, _data.size() - _position); } std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t read_size, rmm::cuda_stream_view stream) override { CUDF_FUNC_RANGE(); read_size = std::min(read_size, _data.size() - _position); // get a device buffer containing read data on the device. auto chunk = rmm::device_uvector<char>(read_size, stream); // copy the host data to device CUDF_CUDA_TRY(cudaMemcpyAsync( // chunk.data(), _data.data() + _position, read_size, cudaMemcpyDefault, stream.value())); _position += read_size; // return the device buffer so it can be processed. return std::make_unique<device_uvector_data_chunk>(std::move(chunk)); } private: std::size_t _position = 0; cudf::host_span<char const> _data; }; /** * @brief A reader which produces view of device memory which represent a subset of the input device * span. */ class device_span_data_chunk_reader : public data_chunk_reader { public: device_span_data_chunk_reader(device_span<char const> data) : _data(data) {} void skip_bytes(std::size_t read_size) override { _position += std::min(read_size, _data.size() - _position); } std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t read_size, rmm::cuda_stream_view stream) override { // limit the read size to the number of bytes remaining in the device_span. read_size = std::min(read_size, _data.size() - _position); // create a view over the device span auto chunk_span = _data.subspan(_position, read_size); // increment position _position += read_size; // return the view over device memory so it can be processed. return std::make_unique<device_span_data_chunk>(chunk_span); } private: device_span<char const> _data; uint64_t _position = 0; }; /** * @brief A datasource-based data chunk source which creates a datasource_chunk_reader. */ class datasource_chunk_source : public data_chunk_source { public: datasource_chunk_source(datasource& source) : _source(&source) {} [[nodiscard]] std::unique_ptr<data_chunk_reader> create_reader() const override { return std::make_unique<datasource_chunk_reader>(_source); } private: datasource* _source; }; /** * @brief A file data source which creates an istream_data_chunk_reader. */ class file_data_chunk_source : public data_chunk_source { public: file_data_chunk_source(std::string_view filename) : _filename(filename) {} [[nodiscard]] std::unique_ptr<data_chunk_reader> create_reader() const override { return std::make_unique<istream_data_chunk_reader>( std::make_unique<std::ifstream>(_filename, std::ifstream::in)); } private: std::string _filename; }; /** * @brief A host string data source which creates an host_span_data_chunk_reader. */ class host_span_data_chunk_source : public data_chunk_source { public: host_span_data_chunk_source(host_span<char const> data) : _data(data) {} [[nodiscard]] std::unique_ptr<data_chunk_reader> create_reader() const override { return std::make_unique<host_span_data_chunk_reader>(_data); } private: host_span<char const> _data; }; /** * @brief A device span data source which creates an istream_data_chunk_reader. */ class device_span_data_chunk_source : public data_chunk_source { public: device_span_data_chunk_source(device_span<char const> data) : _data(data) {} [[nodiscard]] std::unique_ptr<data_chunk_reader> create_reader() const override { return std::make_unique<device_span_data_chunk_reader>(_data); } private: device_span<char const> _data; }; } // namespace std::unique_ptr<data_chunk_source> make_source(datasource& data) { return std::make_unique<datasource_chunk_source>(data); } std::unique_ptr<data_chunk_source> make_source(host_span<char const> data) { return std::make_unique<host_span_data_chunk_source>(data); } std::unique_ptr<data_chunk_source> make_source_from_file(std::string_view filename) { return std::make_unique<file_data_chunk_source>(filename); } std::unique_ptr<data_chunk_source> make_source(cudf::string_scalar& data) { auto data_span = device_span<char const>(data.data(), data.size()); return std::make_unique<device_span_data_chunk_source>(data_span); } } // namespace cudf::io::text
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/byte_range_info.cpp
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/io/text/byte_range_info.hpp> #include <limits> namespace cudf { namespace io { namespace text { byte_range_info create_byte_range_info_max() { return {0, std::numeric_limits<int64_t>::max()}; } std::vector<byte_range_info> create_byte_range_infos_consecutive(int64_t total_bytes, int64_t range_count) { auto range_size = util::div_rounding_up_safe(total_bytes, range_count); auto ranges = std::vector<byte_range_info>(); ranges.reserve(range_size); for (int64_t i = 0; i < range_count; i++) { auto offset = i * range_size; auto size = std::min(range_size, total_bytes - offset); ranges.emplace_back(offset, size); } return ranges; } } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/bgzip_utils.cpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <zlib.h> #include <cudf/io/text/detail/bgzip_utils.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <algorithm> #include <array> #include <fstream> #include <limits> namespace cudf::io::text::detail::bgzip { namespace { template <typename IntType> IntType read_int(char* data) { IntType result{}; // we assume little-endian std::memcpy(&result, &data[0], sizeof(result)); return result; } template <typename T> void write_int(std::ostream& output_stream, T val) { std::array<char, sizeof(T)> bytes; // we assume little-endian std::memcpy(&bytes[0], &val, sizeof(T)); output_stream.write(bytes.data(), bytes.size()); } } // namespace std::array<char, 4> constexpr extra_blocklen_field_header{{66, 67, 2, 0}}; header read_header(std::istream& input_stream) { std::array<char, 12> buffer{}; input_stream.read(buffer.data(), sizeof(buffer)); std::array<uint8_t, 4> constexpr expected_header{{31, 139, 8, 4}}; CUDF_EXPECTS( std::equal( expected_header.begin(), expected_header.end(), reinterpret_cast<uint8_t*>(buffer.data())), "malformed BGZIP header"); // we ignore the remaining bytes of the fixed header, since they don't matter to us auto const extra_length = read_int<uint16_t>(&buffer[10]); uint16_t extra_offset{}; // read all the extra subfields while (extra_offset < extra_length) { auto const remaining_size = extra_length - extra_offset; CUDF_EXPECTS(remaining_size >= 4, "invalid extra field length"); // a subfield consists of 2 identifier bytes and a uint16 length // 66/67 identifies a BGZIP block size field, we skip all other fields input_stream.read(buffer.data(), 4); extra_offset += 4; auto const subfield_size = read_int<uint16_t>(&buffer[2]); if (buffer[0] == extra_blocklen_field_header[0] && buffer[1] == extra_blocklen_field_header[1]) { // the block size subfield contains a single uint16 value, which is block_size - 1 CUDF_EXPECTS( buffer[2] == extra_blocklen_field_header[2] && buffer[3] == extra_blocklen_field_header[3], "malformed BGZIP extra subfield"); input_stream.read(buffer.data(), sizeof(uint16_t)); input_stream.seekg(remaining_size - 6, std::ios_base::cur); auto const block_size_minus_one = read_int<uint16_t>(&buffer[0]); return {block_size_minus_one + 1, extra_length}; } else { input_stream.seekg(subfield_size, std::ios_base::cur); extra_offset += subfield_size; } } CUDF_FAIL("missing BGZIP size extra subfield"); } footer read_footer(std::istream& input_stream) { std::array<char, 8> buffer{}; input_stream.read(buffer.data(), sizeof(buffer)); return {read_int<uint32_t>(&buffer[0]), read_int<uint32_t>(&buffer[4])}; } void write_footer(std::ostream& output_stream, host_span<char const> data) { // compute crc32 with zlib, this allows checking the generated files with external tools write_int<uint32_t>(output_stream, crc32(0, (unsigned char*)data.data(), data.size())); write_int<uint32_t>(output_stream, data.size()); } void write_header(std::ostream& output_stream, uint16_t compressed_size, host_span<char const> pre_size_subfield, host_span<char const> post_size_subfield) { std::array<uint8_t, 10> constexpr header_data{{ 31, // magic number 139, // magic number 8, // compression type: deflate 4, // flags: extra header 0, // mtime 0, // mtime 0, // mtime 0, // mtime: irrelevant 4, // xfl: irrelevant 3 // OS: irrelevant }}; output_stream.write(reinterpret_cast<char const*>(header_data.data()), header_data.size()); auto const extra_size = pre_size_subfield.size() + extra_blocklen_field_header.size() + sizeof(uint16_t) + post_size_subfield.size(); auto const block_size = header_data.size() + sizeof(uint16_t) + extra_size + compressed_size + 2 * sizeof(uint32_t); write_int<uint16_t>(output_stream, extra_size); output_stream.write(pre_size_subfield.data(), pre_size_subfield.size()); output_stream.write(extra_blocklen_field_header.data(), extra_blocklen_field_header.size()); CUDF_EXPECTS(block_size - 1 <= std::numeric_limits<uint16_t>::max(), "block size overflow"); write_int<uint16_t>(output_stream, block_size - 1); output_stream.write(post_size_subfield.data(), post_size_subfield.size()); } void write_uncompressed_block(std::ostream& output_stream, host_span<char const> data, host_span<char const> pre_size_subfields, host_span<char const> post_size_subfields) { CUDF_EXPECTS(data.size() <= std::numeric_limits<uint16_t>::max(), "data size overflow"); write_header(output_stream, data.size() + 5, pre_size_subfields, post_size_subfields); write_int<uint8_t>(output_stream, 1); write_int<uint16_t>(output_stream, data.size()); write_int<uint16_t>(output_stream, ~static_cast<uint16_t>(data.size())); output_stream.write(data.data(), data.size()); write_footer(output_stream, data); } void write_compressed_block(std::ostream& output_stream, host_span<char const> data, host_span<char const> pre_size_subfields, host_span<char const> post_size_subfields) { CUDF_EXPECTS(data.size() <= std::numeric_limits<uint16_t>::max(), "data size overflow"); z_stream deflate_stream{}; // let's make sure we have enough space to store the data std::vector<char> compressed_out(data.size() * 2 + 256); deflate_stream.next_in = reinterpret_cast<unsigned char*>(const_cast<char*>(data.data())); deflate_stream.avail_in = data.size(); deflate_stream.next_out = reinterpret_cast<unsigned char*>(compressed_out.data()); deflate_stream.avail_out = compressed_out.size(); CUDF_EXPECTS( deflateInit2(&deflate_stream, // stream Z_DEFAULT_COMPRESSION, // compression level Z_DEFLATED, // method -15, // log2 of window size (negative value means no ZLIB header/footer) 9, // mem level: best performance/most memory usage for compression Z_DEFAULT_STRATEGY // strategy ) == Z_OK, "deflateInit failed"); CUDF_EXPECTS(deflate(&deflate_stream, Z_FINISH) == Z_STREAM_END, "deflate failed"); CUDF_EXPECTS(deflateEnd(&deflate_stream) == Z_OK, "deflateEnd failed"); write_header(output_stream, deflate_stream.total_out, pre_size_subfields, post_size_subfields); output_stream.write(compressed_out.data(), deflate_stream.total_out); write_footer(output_stream, data); } } // namespace cudf::io::text::detail::bgzip
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/device_data_chunks.hpp
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/text/data_chunk_source.hpp> namespace cudf::io::text { class device_span_data_chunk : public device_data_chunk { public: device_span_data_chunk(device_span<char const> data) : _data(data) {} [[nodiscard]] char const* data() const override { return _data.data(); } [[nodiscard]] std::size_t size() const override { return _data.size(); } operator device_span<char const>() const override { return _data; } private: device_span<char const> _data; }; class device_uvector_data_chunk : public device_data_chunk { public: device_uvector_data_chunk(rmm::device_uvector<char>&& data) : _data(std::move(data)) {} [[nodiscard]] char const* data() const override { return _data.data(); } [[nodiscard]] std::size_t size() const override { return _data.size(); } operator device_span<char const>() const override { return _data; } private: rmm::device_uvector<char> _data; }; } // namespace cudf::io::text
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/bgzip_data_chunk_source.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "io/text/device_data_chunks.hpp" #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/config_utils.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/pinned_host_vector.hpp> #include <cudf/io/text/data_chunk_source_factories.hpp> #include <cudf/io/text/detail/bgzip_utils.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/exec_policy.hpp> #include <thrust/host_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <fstream> #include <limits> namespace cudf::io::text { namespace { /** * @brief Transforms offset tuples of the form [compressed_begin, compressed_end, * decompressed_begin, decompressed_end] into span tuples of the form [compressed_device_span, * decompressed_device_span] based on the provided pointers. */ struct bgzip_nvcomp_transform_functor { uint8_t const* compressed_ptr; uint8_t* decompressed_ptr; __device__ thrust::tuple<device_span<uint8_t const>, device_span<uint8_t>> operator()( thrust::tuple<std::size_t, std::size_t, std::size_t, std::size_t> t) { auto const compressed_begin = thrust::get<0>(t); auto const compressed_end = thrust::get<1>(t); auto const decompressed_begin = thrust::get<2>(t); auto const decompressed_end = thrust::get<3>(t); return thrust::make_tuple(device_span<uint8_t const>{compressed_ptr + compressed_begin, compressed_end - compressed_begin}, device_span<uint8_t>{decompressed_ptr + decompressed_begin, decompressed_end - decompressed_begin}); } }; class bgzip_data_chunk_reader : public data_chunk_reader { private: template <typename T> static void copy_to_device(cudf::detail::pinned_host_vector<T> const& host, rmm::device_uvector<T>& device, rmm::cuda_stream_view stream) { // Buffer needs to be padded. // Required by `inflate_kernel`. device.resize(cudf::util::round_up_safe(host.size(), BUFFER_PADDING_MULTIPLE), stream); CUDF_CUDA_TRY(cudaMemcpyAsync( device.data(), host.data(), host.size() * sizeof(T), cudaMemcpyDefault, stream.value())); } struct decompression_blocks { static constexpr std::size_t default_buffer_alloc = 1 << 24; // 16MB buffer allocation, resized on demand static constexpr std::size_t default_offset_alloc = 1 << 16; // 64k offset allocation, resized on demand cudaEvent_t event; cudf::detail::pinned_host_vector<char> h_compressed_blocks; cudf::detail::pinned_host_vector<std::size_t> h_compressed_offsets; cudf::detail::pinned_host_vector<std::size_t> h_decompressed_offsets; rmm::device_uvector<char> d_compressed_blocks; rmm::device_uvector<char> d_decompressed_blocks; rmm::device_uvector<std::size_t> d_compressed_offsets; rmm::device_uvector<std::size_t> d_decompressed_offsets; rmm::device_uvector<device_span<uint8_t const>> d_compressed_spans; rmm::device_uvector<device_span<uint8_t>> d_decompressed_spans; rmm::device_uvector<compression_result> d_decompression_results; std::size_t compressed_size_with_headers{}; std::size_t max_decompressed_size{}; // this is usually equal to decompressed_size() // unless we are in the last chunk, where it's limited by _local_end std::size_t available_decompressed_size{}; std::size_t read_pos{}; bool is_decompressed{}; decompression_blocks(rmm::cuda_stream_view init_stream) : d_compressed_blocks(0, init_stream), d_decompressed_blocks(0, init_stream), d_compressed_offsets(0, init_stream), d_decompressed_offsets(0, init_stream), d_compressed_spans(0, init_stream), d_decompressed_spans(0, init_stream), d_decompression_results(0, init_stream) { CUDF_CUDA_TRY(cudaEventCreate(&event)); h_compressed_blocks.reserve(default_buffer_alloc); h_compressed_offsets.reserve(default_offset_alloc); h_compressed_offsets.push_back(0); h_decompressed_offsets.reserve(default_offset_alloc); h_decompressed_offsets.push_back(0); } void decompress(rmm::cuda_stream_view stream) { if (is_decompressed) { return; } copy_to_device(h_compressed_blocks, d_compressed_blocks, stream); copy_to_device(h_compressed_offsets, d_compressed_offsets, stream); copy_to_device(h_decompressed_offsets, d_decompressed_offsets, stream); d_decompressed_blocks.resize(decompressed_size(), stream); d_compressed_spans.resize(num_blocks(), stream); d_decompressed_spans.resize(num_blocks(), stream); d_decompression_results.resize(num_blocks(), stream); auto offset_it = thrust::make_zip_iterator(d_compressed_offsets.begin(), d_compressed_offsets.begin() + 1, d_decompressed_offsets.begin(), d_decompressed_offsets.begin() + 1); auto span_it = thrust::make_zip_iterator(d_compressed_spans.begin(), d_decompressed_spans.begin()); thrust::transform( rmm::exec_policy_nosync(stream), offset_it, offset_it + num_blocks(), span_it, bgzip_nvcomp_transform_functor{reinterpret_cast<uint8_t const*>(d_compressed_blocks.data()), reinterpret_cast<uint8_t*>(d_decompressed_blocks.data())}); if (decompressed_size() > 0) { if (nvcomp::is_decompression_disabled(nvcomp::compression_type::DEFLATE)) { gpuinflate(d_compressed_spans, d_decompressed_spans, d_decompression_results, gzip_header_included::NO, stream); } else { cudf::io::nvcomp::batched_decompress(cudf::io::nvcomp::compression_type::DEFLATE, d_compressed_spans, d_decompressed_spans, d_decompression_results, max_decompressed_size, decompressed_size(), stream); } } is_decompressed = true; } void reset() { h_compressed_blocks.resize(0); h_compressed_offsets.resize(1); h_decompressed_offsets.resize(1); // shrinking doesn't allocate/free, so we don't need to worry about streams auto stream = cudf::get_default_stream(); d_compressed_blocks.resize(0, stream); d_decompressed_blocks.resize(0, stream); d_compressed_offsets.resize(0, stream); d_decompressed_offsets.resize(0, stream); d_compressed_spans.resize(0, stream); d_decompressed_spans.resize(0, stream); d_decompression_results.resize(0, stream); compressed_size_with_headers = 0; max_decompressed_size = 0; available_decompressed_size = 0; read_pos = 0; is_decompressed = false; } [[nodiscard]] std::size_t num_blocks() const { return h_compressed_offsets.size() - 1; } [[nodiscard]] std::size_t compressed_size() const { return h_compressed_offsets.back(); } [[nodiscard]] std::size_t decompressed_size() const { return h_decompressed_offsets.back(); } [[nodiscard]] std::size_t remaining_size() const { return available_decompressed_size - read_pos; } void read_block(detail::bgzip::header header, std::istream& stream) { h_compressed_blocks.resize(h_compressed_blocks.size() + header.data_size()); stream.read(h_compressed_blocks.data() + compressed_size(), header.data_size()); } void add_block_offsets(detail::bgzip::header header, detail::bgzip::footer footer) { max_decompressed_size = std::max<std::size_t>(footer.decompressed_size, max_decompressed_size); h_compressed_offsets.push_back(compressed_size() + header.data_size()); h_decompressed_offsets.push_back(decompressed_size() + footer.decompressed_size); } void consume_bytes(std::size_t size) { CUDF_EXPECTS(size <= remaining_size(), "out of bounds"); read_pos += size; } }; void read_next_compressed_chunk(std::size_t requested_size) { std::swap(_curr_blocks, _prev_blocks); if (_curr_blocks.is_decompressed) { // synchronize on the last decompression + copy, so we don't clobber any buffers CUDF_CUDA_TRY(cudaEventSynchronize(_curr_blocks.event)); } _curr_blocks.reset(); // read chunks until we have enough decompressed data while (_curr_blocks.decompressed_size() < requested_size) { // calling peek on an already EOF stream causes it to fail, we need to avoid that if (_data_stream->eof()) { break; } // peek is necessary if we are already at the end, but didn't try to read another byte _data_stream->peek(); if (_data_stream->eof() || _compressed_pos > _compressed_end) { break; } auto header = detail::bgzip::read_header(*_data_stream); _curr_blocks.read_block(header, *_data_stream); auto footer = detail::bgzip::read_footer(*_data_stream); _curr_blocks.add_block_offsets(header, footer); // for the last GZIP block, we restrict ourselves to the bytes up to _local_end // but only for the reader, not for decompression! if (_compressed_pos == _compressed_end) { _curr_blocks.available_decompressed_size += _local_end; _compressed_pos += header.block_size; break; } else { _curr_blocks.available_decompressed_size += footer.decompressed_size; _compressed_pos += header.block_size; } } } constexpr static std::size_t chunk_load_size = 1 << 24; // load 16 MB of data by default public: bgzip_data_chunk_reader(std::unique_ptr<std::istream> input_stream, uint64_t virtual_begin, uint64_t virtual_end) : _data_stream(std::move(input_stream)), _prev_blocks{cudf::get_default_stream()}, // here we can use the default stream because _curr_blocks{cudf::get_default_stream()}, // we only initialize empty device_uvectors _local_end{virtual_end & 0xFFFFu}, _compressed_pos{virtual_begin >> 16}, _compressed_end{virtual_end >> 16} { // set failbit to throw on IO failures _data_stream->exceptions(std::istream::failbit); // seek to the beginning of the provided compressed offset _data_stream->seekg(_compressed_pos, std::ios_base::cur); // read the first blocks read_next_compressed_chunk(chunk_load_size); // seek to the beginning of the provided local offset auto const local_pos = virtual_begin & 0xFFFFu; if (local_pos > 0) { CUDF_EXPECTS(_curr_blocks.h_decompressed_offsets.size() > 1 && local_pos < _curr_blocks.h_decompressed_offsets[1], "local part of virtual offset is out of bounds"); _curr_blocks.consume_bytes(local_pos); } } void skip_bytes(std::size_t read_size) override { while (read_size > _curr_blocks.remaining_size()) { read_size -= _curr_blocks.remaining_size(); _curr_blocks.consume_bytes(_curr_blocks.remaining_size()); read_next_compressed_chunk(chunk_load_size); // calling peek on an already EOF stream causes it to fail, we need to avoid that if (_data_stream->eof()) { break; } // peek is necessary if we are already at the end, but didn't try to read another byte _data_stream->peek(); if (_data_stream->eof() || _compressed_pos > _compressed_end) { break; } } read_size = std::min(read_size, _curr_blocks.remaining_size()); _curr_blocks.consume_bytes(read_size); } std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t read_size, rmm::cuda_stream_view stream) override { CUDF_FUNC_RANGE(); if (read_size <= _curr_blocks.remaining_size()) { _curr_blocks.decompress(stream); rmm::device_uvector<char> data(read_size, stream); CUDF_CUDA_TRY( cudaMemcpyAsync(data.data(), _curr_blocks.d_decompressed_blocks.data() + _curr_blocks.read_pos, read_size, cudaMemcpyDefault, stream.value())); // record the host-to-device copy, decompression and device copy CUDF_CUDA_TRY(cudaEventRecord(_curr_blocks.event, stream.value())); _curr_blocks.consume_bytes(read_size); return std::make_unique<device_uvector_data_chunk>(std::move(data)); } read_next_compressed_chunk(read_size /* - _curr_blocks.remaining_size()*/); _prev_blocks.decompress(stream); _curr_blocks.decompress(stream); read_size = std::min(read_size, _prev_blocks.remaining_size() + _curr_blocks.remaining_size()); rmm::device_uvector<char> data(read_size, stream); CUDF_CUDA_TRY(cudaMemcpyAsync(data.data(), _prev_blocks.d_decompressed_blocks.data() + _prev_blocks.read_pos, _prev_blocks.remaining_size(), cudaMemcpyDefault, stream.value())); CUDF_CUDA_TRY(cudaMemcpyAsync(data.data() + _prev_blocks.remaining_size(), _curr_blocks.d_decompressed_blocks.data() + _curr_blocks.read_pos, read_size - _prev_blocks.remaining_size(), cudaMemcpyDefault, stream.value())); // record the host-to-device copy, decompression and device copy CUDF_CUDA_TRY(cudaEventRecord(_curr_blocks.event, stream.value())); CUDF_CUDA_TRY(cudaEventRecord(_prev_blocks.event, stream.value())); read_size -= _prev_blocks.remaining_size(); _prev_blocks.consume_bytes(_prev_blocks.remaining_size()); _curr_blocks.consume_bytes(read_size); return std::make_unique<device_uvector_data_chunk>(std::move(data)); } private: std::unique_ptr<std::istream> _data_stream; decompression_blocks _prev_blocks; decompression_blocks _curr_blocks; std::size_t _local_end; std::size_t _compressed_pos; std::size_t _compressed_end; }; class bgzip_data_chunk_source : public data_chunk_source { public: bgzip_data_chunk_source(std::string_view filename, uint64_t virtual_begin, uint64_t virtual_end) : _filename{filename}, _virtual_begin{virtual_begin}, _virtual_end{virtual_end} { } [[nodiscard]] std::unique_ptr<data_chunk_reader> create_reader() const override { return std::make_unique<bgzip_data_chunk_reader>( std::make_unique<std::ifstream>(_filename, std::ifstream::in), _virtual_begin, _virtual_end); } private: std::string _filename; uint64_t _virtual_begin; uint64_t _virtual_end; }; } // namespace std::unique_ptr<data_chunk_source> make_source_from_bgzip_file(std::string_view filename, uint64_t virtual_begin, uint64_t virtual_end) { return std::make_unique<bgzip_data_chunk_source>(filename, virtual_begin, virtual_end); } std::unique_ptr<data_chunk_source> make_source_from_bgzip_file(std::string_view filename) { return std::make_unique<bgzip_data_chunk_source>( filename, 0, std::numeric_limits<uint64_t>::max()); } } // namespace cudf::io::text
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/text/multibyte_split.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/utilities/output_builder.cuh> #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/stream_pool.hpp> #include <cudf/io/text/byte_range_info.hpp> #include <cudf/io/text/data_chunk_source.hpp> #include <cudf/io/text/detail/multistate.hpp> #include <cudf/io/text/detail/tile_state.hpp> #include <cudf/io/text/multibyte_split.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/detail/strings_column_factories.cuh> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/copy.h> #include <thrust/find.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform.h> #include <cub/block/block_load.cuh> #include <cub/block/block_scan.cuh> #include <cstdint> #include <limits> #include <memory> #include <numeric> #include <optional> namespace { using cudf::io::text::detail::multistate; int32_t constexpr ITEMS_PER_THREAD = 64; int32_t constexpr THREADS_PER_TILE = 128; int32_t constexpr ITEMS_PER_TILE = ITEMS_PER_THREAD * THREADS_PER_TILE; int32_t constexpr TILES_PER_CHUNK = 4096; int32_t constexpr ITEMS_PER_CHUNK = ITEMS_PER_TILE * TILES_PER_CHUNK; constexpr multistate transition_init(char c, cudf::device_span<char const> delim) { auto result = multistate(); result.enqueue(0, 0); for (std::size_t i = 0; i < delim.size(); i++) { if (delim[i] == c) { result.enqueue(i, i + 1); } } return result; } constexpr multistate transition(char c, multistate state, cudf::device_span<char const> delim) { auto result = multistate(); result.enqueue(0, 0); for (uint8_t i = 0; i < state.size(); i++) { auto const tail = state.get_tail(i); if (tail < delim.size() && delim[tail] == c) { result.enqueue(state.get_head(i), tail + 1); } } return result; } struct PatternScan { using BlockScan = cub::BlockScan<multistate, THREADS_PER_TILE>; using BlockScanCallback = cudf::io::text::detail::scan_tile_state_callback<multistate>; struct _TempStorage { typename BlockScan::TempStorage scan; }; _TempStorage& _temp_storage; using TempStorage = cub::Uninitialized<_TempStorage>; __device__ inline PatternScan(TempStorage& temp_storage) : _temp_storage(temp_storage.Alias()) {} __device__ inline void Scan(cudf::size_type tile_idx, cudf::io::text::detail::scan_tile_state_view<multistate> tile_state, cudf::device_span<char const> delim, char (&thread_data)[ITEMS_PER_THREAD], multistate& thread_multistate) { thread_multistate = transition_init(thread_data[0], delim); for (uint32_t i = 1; i < ITEMS_PER_THREAD; i++) { thread_multistate = transition(thread_data[i], thread_multistate, delim); } auto prefix_callback = BlockScanCallback(tile_state, tile_idx); BlockScan(_temp_storage.scan) .ExclusiveSum(thread_multistate, thread_multistate, prefix_callback); } }; // type aliases to distinguish between row offsets and character offsets using output_offset = int64_t; using byte_offset = int64_t; // multibyte_split works by splitting up inputs in to 32 inputs (bytes) per thread, and transforming // them in to data structures called "multistates". these multistates are created by searching a // trie, but instead of a tradition trie where the search begins at a single node at the beginning, // we allow our search to begin anywhere within the trie tree. The position within the trie tree is // stored as a "partial match path", which indicates "we can get from here to there by a set of // specific transitions". By scanning together multistates, we effectively know "we can get here // from the beginning by following the inputs". By doing this, each thread knows exactly what state // it begins in. From there, each thread can then take deterministic action. In this case, the // deterministic action is counting and outputting delimiter offsets when a delimiter is found. __global__ void multibyte_split_init_kernel( cudf::size_type base_tile_idx, cudf::size_type num_tiles, cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates, cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets, cudf::io::text::detail::scan_tile_status status = cudf::io::text::detail::scan_tile_status::invalid) { auto const thread_idx = cudf::detail::grid_1d::global_thread_id(); if (thread_idx < num_tiles) { auto const tile_idx = base_tile_idx + thread_idx; tile_multistates.set_status(tile_idx, status); tile_output_offsets.set_status(tile_idx, status); } } __global__ __launch_bounds__(THREADS_PER_TILE) void multibyte_split_kernel( cudf::size_type base_tile_idx, byte_offset base_input_offset, output_offset base_output_offset, cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates, cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets, cudf::device_span<char const> delim, cudf::device_span<char const> chunk_input_chars, cudf::split_device_span<byte_offset> row_offsets) { using InputLoad = cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>; using OffsetScan = cub::BlockScan<output_offset, THREADS_PER_TILE>; using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>; __shared__ union { typename InputLoad::TempStorage input_load; typename PatternScan::TempStorage pattern_scan; typename OffsetScan::TempStorage offset_scan; } temp_storage; auto const tile_idx = base_tile_idx + blockIdx.x; auto const tile_input_offset = blockIdx.x * ITEMS_PER_TILE; auto const thread_input_offset = tile_input_offset + cudf::thread_index_type{threadIdx.x} * ITEMS_PER_THREAD; auto const thread_input_size = std::max<cudf::size_type>(chunk_input_chars.size() - thread_input_offset, 0); // STEP 1: Load inputs char thread_chars[ITEMS_PER_THREAD]; InputLoad(temp_storage.input_load) .Load(chunk_input_chars.data() + tile_input_offset, thread_chars, chunk_input_chars.size() - tile_input_offset); // STEP 2: Scan inputs to determine absolute thread states multistate thread_multistate; __syncthreads(); // required before temp_memory re-use PatternScan(temp_storage.pattern_scan) .Scan(tile_idx, tile_multistates, delim, thread_chars, thread_multistate); // STEP 3: Flag matches output_offset thread_offset{}; uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{}; for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) { thread_multistate = transition(thread_chars[i], thread_multistate, delim); auto const thread_state = thread_multistate.max_tail(); auto const is_match = i < thread_input_size and thread_state == delim.size(); thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32); thread_offset += output_offset{is_match}; } // STEP 4: Scan flags to determine absolute thread output offset auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx); __syncthreads(); // required before temp_memory re-use OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback); // Step 5: Assign outputs from each thread using match offsets. for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) { auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u; if (is_match) { auto const match_end = base_input_offset + thread_input_offset + i + 1; row_offsets[thread_offset - base_output_offset] = match_end; thread_offset++; } } } __global__ __launch_bounds__(THREADS_PER_TILE) void byte_split_kernel( cudf::size_type base_tile_idx, byte_offset base_input_offset, output_offset base_output_offset, cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets, char delim, cudf::device_span<char const> chunk_input_chars, cudf::split_device_span<byte_offset> row_offsets) { using InputLoad = cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>; using OffsetScan = cub::BlockScan<output_offset, THREADS_PER_TILE>; using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>; __shared__ union { typename InputLoad::TempStorage input_load; typename OffsetScan::TempStorage offset_scan; } temp_storage; auto const tile_idx = base_tile_idx + blockIdx.x; auto const tile_input_offset = blockIdx.x * ITEMS_PER_TILE; auto const thread_input_offset = tile_input_offset + cudf::thread_index_type{threadIdx.x} * ITEMS_PER_THREAD; auto const thread_input_size = std::max<cudf::size_type>(chunk_input_chars.size() - thread_input_offset, 0); // STEP 1: Load inputs char thread_chars[ITEMS_PER_THREAD]; InputLoad(temp_storage.input_load) .Load(chunk_input_chars.data() + tile_input_offset, thread_chars, chunk_input_chars.size() - tile_input_offset); // STEP 2: Flag matches output_offset thread_offset{}; uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{}; for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) { auto const is_match = i < thread_input_size and thread_chars[i] == delim; thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32); thread_offset += output_offset{is_match}; } // STEP 3: Scan flags to determine absolute thread output offset auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx); __syncthreads(); // required before temp_memory re-use OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback); // Step 4: Assign outputs from each thread using match offsets. for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) { auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u; if (is_match) { auto const match_end = base_input_offset + thread_input_offset + i + 1; row_offsets[thread_offset - base_output_offset] = match_end; thread_offset++; } } } } // namespace namespace cudf { namespace io { namespace text { namespace detail { std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source, std::string const& delimiter, byte_range_info byte_range, bool strip_delimiters, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (byte_range.empty()) { return make_empty_column(type_id::STRING); } auto device_delim = cudf::string_scalar(delimiter, true, stream, mr); auto sorted_delim = delimiter; std::sort(sorted_delim.begin(), sorted_delim.end()); auto [_last_char, _last_char_count, max_duplicate_tokens] = std::accumulate( sorted_delim.begin(), sorted_delim.end(), std::make_tuple('\0', 0, 0), [](auto acc, char c) { if (std::get<0>(acc) != c) { std::get<0>(acc) = c; std::get<1>(acc) = 0; } std::get<1>(acc)++; std::get<2>(acc) = std::max(std::get<1>(acc), std::get<2>(acc)); return acc; }); CUDF_EXPECTS(max_duplicate_tokens < multistate::max_segment_count, "delimiter contains too many duplicate tokens to produce a deterministic result."); CUDF_EXPECTS(delimiter.size() < multistate::max_segment_value, "delimiter contains too many total tokens to produce a deterministic result."); auto const concurrency = 2; // must be at least 32 when using warp-reduce on partials // must be at least 1 more than max possible concurrent tiles // best when at least 32 more than max possible concurrent tiles, due to rolling `invalid`s auto num_tile_states = std::max(32, TILES_PER_CHUNK * concurrency + 32); auto tile_multistates = scan_tile_state<multistate>(num_tile_states, stream, rmm::mr::get_current_device_resource()); auto tile_offsets = scan_tile_state<output_offset>(num_tile_states, stream, rmm::mr::get_current_device_resource()); multibyte_split_init_kernel<<<TILES_PER_CHUNK, THREADS_PER_TILE, 0, stream.value()>>>( // -TILES_PER_CHUNK, TILES_PER_CHUNK, tile_multistates, tile_offsets, cudf::io::text::detail::scan_tile_status::oob); auto multistate_seed = multistate(); multistate_seed.enqueue(0, 0); // this represents the first state in the pattern. // Seeding the tile state with an identity value allows the 0th tile to follow the same logic as // the Nth tile, assuming it can look up an inclusive prefix. Without this seed, the 0th block // would have to follow separate logic. cudf::detail::device_single_thread( [tm = scan_tile_state_view<multistate>(tile_multistates), to = scan_tile_state_view<output_offset>(tile_offsets), multistate_seed] __device__() mutable { tm.set_inclusive_prefix(-1, multistate_seed); to.set_inclusive_prefix(-1, 0); }, stream); auto reader = source.create_reader(); auto chunk_offset = std::max<byte_offset>(0, byte_range.offset() - delimiter.size()); auto const byte_range_end = byte_range.offset() + byte_range.size(); reader->skip_bytes(chunk_offset); // amortize output chunk allocations over 8 worst-case outputs. This limits the overallocation constexpr auto max_growth = 8; output_builder<byte_offset> row_offset_storage(ITEMS_PER_CHUNK, max_growth, stream); output_builder<char> char_storage(ITEMS_PER_CHUNK, max_growth, stream); auto streams = cudf::detail::fork_streams(stream, concurrency); cudaEvent_t last_launch_event; CUDF_CUDA_TRY(cudaEventCreate(&last_launch_event)); auto& read_stream = streams[0]; auto& scan_stream = streams[1]; auto chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream); int64_t base_tile_idx = 0; std::optional<byte_offset> first_row_offset; std::optional<byte_offset> last_row_offset; bool found_last_offset = false; if (byte_range.offset() == 0) { first_row_offset = 0; } std::swap(read_stream, scan_stream); while (chunk->size() > 0) { // if we found the last delimiter, or didn't find delimiters inside the byte range at all: abort if (last_row_offset.has_value() or (not first_row_offset.has_value() and chunk_offset >= byte_range_end)) { break; } auto tiles_in_launch = cudf::util::div_rounding_up_safe(chunk->size(), static_cast<std::size_t>(ITEMS_PER_TILE)); auto row_offsets = row_offset_storage.next_output(scan_stream); // reset the next chunk of tile state multibyte_split_init_kernel<<<tiles_in_launch, THREADS_PER_TILE, 0, scan_stream.value()>>>( // base_tile_idx, tiles_in_launch, tile_multistates, tile_offsets); CUDF_CUDA_TRY(cudaStreamWaitEvent(scan_stream.value(), last_launch_event)); if (delimiter.size() == 1) { // the single-byte case allows for a much more efficient kernel, so we special-case it byte_split_kernel<<<tiles_in_launch, THREADS_PER_TILE, 0, scan_stream.value()>>>( // base_tile_idx, chunk_offset, row_offset_storage.size(), tile_offsets, delimiter[0], *chunk, row_offsets); } else { multibyte_split_kernel<<<tiles_in_launch, THREADS_PER_TILE, 0, scan_stream.value()>>>( // base_tile_idx, chunk_offset, row_offset_storage.size(), tile_multistates, tile_offsets, {device_delim.data(), static_cast<std::size_t>(device_delim.size())}, *chunk, row_offsets); } // load the next chunk auto next_chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream); // while that is running, determine how many offsets we output (synchronizes) auto const new_offsets = [&] { auto const new_offsets_unclamped = tile_offsets.get_inclusive_prefix(base_tile_idx + tiles_in_launch - 1, scan_stream) - static_cast<output_offset>(row_offset_storage.size()); // if we are not in the last chunk, we can use all offsets if (chunk_offset + static_cast<output_offset>(chunk->size()) < byte_range_end) { return new_offsets_unclamped; } // if we are in the last chunk, we need to find the first out-of-bounds offset auto const it = thrust::make_counting_iterator(output_offset{}); auto const end_loc = *thrust::find_if(rmm::exec_policy_nosync(scan_stream), it, it + new_offsets_unclamped, [row_offsets, byte_range_end] __device__(output_offset i) { return row_offsets[i] >= byte_range_end; }); // if we had no out-of-bounds offset, we copy all offsets if (end_loc == new_offsets_unclamped) { return end_loc; } // otherwise we copy only up to (including) the first out-of-bounds delimiter found_last_offset = true; return end_loc + 1; }(); row_offset_storage.advance_output(new_offsets, scan_stream); // determine if we found the first or last field offset for the byte range if (new_offsets > 0 and not first_row_offset) { first_row_offset = row_offset_storage.front_element(scan_stream); } if (found_last_offset) { last_row_offset = row_offset_storage.back_element(scan_stream); } // copy over the characters we need, if we already encountered the first field delimiter if (first_row_offset.has_value()) { auto const begin = chunk->data() + std::max<byte_offset>(0, *first_row_offset - chunk_offset); auto const sentinel = last_row_offset.value_or(std::numeric_limits<byte_offset>::max()); auto const end = chunk->data() + std::min<byte_offset>(sentinel - chunk_offset, chunk->size()); auto const output_size = end - begin; auto char_output = char_storage.next_output(scan_stream); thrust::copy(rmm::exec_policy_nosync(scan_stream), begin, end, char_output.begin()); char_storage.advance_output(output_size, scan_stream); } CUDF_CUDA_TRY(cudaEventRecord(last_launch_event, scan_stream.value())); std::swap(read_stream, scan_stream); base_tile_idx += tiles_in_launch; chunk_offset += chunk->size(); chunk = std::move(next_chunk); } CUDF_CUDA_TRY(cudaEventDestroy(last_launch_event)); cudf::detail::join_streams(streams, stream); // if the input was empty, we didn't find a delimiter at all, // or the first delimiter was also the last: empty output if (chunk_offset == 0 or not first_row_offset.has_value() or first_row_offset == last_row_offset) { return make_empty_column(type_id::STRING); } auto chars = char_storage.gather(stream, mr); auto global_offsets = row_offset_storage.gather(stream, mr); // insert an offset at the beginning if we started at the beginning of the input bool const insert_begin = first_row_offset.value_or(0) == 0; // insert an offset at the end if we have not terminated the last row bool const insert_end = not(last_row_offset.has_value() or (global_offsets.size() > 0 and global_offsets.back_element(stream) == chunk_offset)); rmm::device_uvector<int32_t> offsets{ global_offsets.size() + insert_begin + insert_end, stream, mr}; if (insert_begin) { offsets.set_element_to_zero_async(0, stream); } if (insert_end) { offsets.set_element(offsets.size() - 1, chunk_offset - *first_row_offset, stream); } thrust::transform(rmm::exec_policy(stream), global_offsets.begin(), global_offsets.end(), offsets.begin() + insert_begin, [baseline = *first_row_offset] __device__(byte_offset global_offset) { return static_cast<int32_t>(global_offset - baseline); }); auto string_count = offsets.size() - 1; if (strip_delimiters) { auto it = cudf::detail::make_counting_transform_iterator( 0, [ofs = offsets.data(), chars = chars.data(), delim_size = static_cast<size_type>(delimiter.size()), last_row = static_cast<size_type>(string_count) - 1, insert_end] __device__(size_type row) { auto const begin = ofs[row]; auto const len = ofs[row + 1] - begin; if (row == last_row && insert_end) { return thrust::make_pair(chars + begin, len); } else { return thrust::make_pair(chars + begin, std::max<size_type>(0, len - delim_size)); }; }); return cudf::strings::detail::make_strings_column(it, it + string_count, stream, mr); } else { return cudf::make_strings_column(string_count, std::move(offsets), std::move(chars), {}, 0); } } } // namespace detail std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source, std::string const& delimiter, std::optional<byte_range_info> byte_range, rmm::mr::device_memory_resource* mr) { return multibyte_split( source, delimiter, parse_options{byte_range.value_or(create_byte_range_info_max())}, mr); } std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source, std::string const& delimiter, parse_options options, rmm::mr::device_memory_resource* mr) { auto stream = cudf::get_default_stream(); auto result = detail::multibyte_split( source, delimiter, options.byte_range, options.strip_delimiters, stream, mr); return result; } std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source, std::string const& delimiter, rmm::mr::device_memory_resource* mr) { return multibyte_split(source, delimiter, parse_options{}, mr); } } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/delta_binary.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "page_decode.cuh" namespace cudf::io::parquet::detail { // DELTA_XXX encoding support // // DELTA_BINARY_PACKED is used for INT32 and INT64 data types. Encoding begins with a header // containing a block size, number of mini-blocks in each block, total value count, and first // value. The first three are ULEB128 variable length ints, and the last is a zigzag ULEB128 // varint. // -- the block size is a multiple of 128 // -- the mini-block count is chosen so that each mini-block will contain a multiple of 32 values // -- the value count includes the first value stored in the header // // It seems most Parquet encoders will stick with a block size of 128, and 4 mini-blocks of 32 // elements each. arrow-rs will use a block size of 256 for 64-bit ints. // // Following the header are the data blocks. Each block is further divided into mini-blocks, with // each mini-block having its own encoding bitwidth. Each block begins with a header containing a // zigzag ULEB128 encoded minimum delta value, followed by an array of uint8 bitwidths, one entry // per mini-block. While encoding, the lowest delta value is subtracted from all the deltas in the // block to ensure that all encoded values are positive. The deltas for each mini-block are bit // packed using the same encoding as the RLE/Bit-Packing Hybrid encoder. // The largest mini-block size we can currently support. constexpr int max_delta_mini_block_size = 64; // The first pass decodes `values_per_mb` values, and then the second pass does another // batch of size `values_per_mb`. The largest value for values_per_miniblock among the // major writers seems to be 64, so 2 * 64 should be good. We save the first value separately // since it is not encoded in the first mini-block. constexpr int delta_rolling_buf_size = 2 * max_delta_mini_block_size; /** * @brief Read a ULEB128 varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The value read */ inline __device__ uleb128_t get_uleb128(uint8_t const*& cur, uint8_t const* end) { uleb128_t v = 0, l = 0, c; while (cur < end) { c = *cur++; v |= (c & 0x7f) << l; l += 7; if ((c & 0x80) == 0) { return v; } } return v; } /** * @brief Read a ULEB128 zig-zag encoded varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The value read */ inline __device__ zigzag128_t get_zz128(uint8_t const*& cur, uint8_t const* end) { uleb128_t u = get_uleb128(cur, end); return static_cast<zigzag128_t>((u >> 1u) ^ -static_cast<zigzag128_t>(u & 1)); } struct delta_binary_decoder { uint8_t const* block_start; // start of data, but updated as data is read uint8_t const* block_end; // end of data uleb128_t block_size; // usually 128, must be multiple of 128 uleb128_t mini_block_count; // usually 4, chosen such that block_size/mini_block_count is a // multiple of 32 uleb128_t value_count; // total values encoded in the block zigzag128_t first_value; // initial value, stored in the header zigzag128_t last_value; // last value decoded uint32_t values_per_mb; // block_size / mini_block_count, must be multiple of 32 uint32_t current_value_idx; // current value index, initialized to 0 at start of block zigzag128_t cur_min_delta; // min delta for the block uint32_t cur_mb; // index of the current mini-block within the block uint8_t const* cur_mb_start; // pointer to the start of the current mini-block data uint8_t const* cur_bitwidths; // pointer to the bitwidth array in the block uleb128_t value[delta_rolling_buf_size]; // circular buffer of delta values // returns the value stored in the `value` array at index // `rolling_index<delta_rolling_buf_size>(idx)`. If `idx` is `0`, then return `first_value`. constexpr zigzag128_t value_at(size_type idx) { return idx == 0 ? first_value : value[rolling_index<delta_rolling_buf_size>(idx)]; } // returns the number of values encoded in the block data. when all_values is true, // account for the first value in the header. otherwise just count the values encoded // in the mini-block data. constexpr uint32_t num_encoded_values(bool all_values) { return value_count == 0 ? 0 : all_values ? value_count : value_count - 1; } // read mini-block header into state object. should only be called from init_binary_block or // setup_next_mini_block. header format is: // // | min delta (int) | bit-width array (1 byte * mini_block_count) | // // on exit db->cur_mb is 0 and db->cur_mb_start points to the first mini-block of data, or // nullptr if out of data. // is_decode indicates whether this is being called from initialization code (false) or // the actual decoding (true) inline __device__ void init_mini_block(bool is_decode) { cur_mb = 0; cur_mb_start = nullptr; if (current_value_idx < num_encoded_values(is_decode)) { auto d_start = block_start; cur_min_delta = get_zz128(d_start, block_end); cur_bitwidths = d_start; d_start += mini_block_count; cur_mb_start = d_start; } } // read delta binary header into state object. should be called on thread 0. header format is: // // | block size (uint) | mini-block count (uint) | value count (uint) | first value (int) | // // also initializes the first mini-block before exit inline __device__ void init_binary_block(uint8_t const* d_start, uint8_t const* d_end) { block_end = d_end; block_size = get_uleb128(d_start, d_end); mini_block_count = get_uleb128(d_start, d_end); value_count = get_uleb128(d_start, d_end); first_value = get_zz128(d_start, d_end); last_value = first_value; current_value_idx = 0; values_per_mb = block_size / mini_block_count; // init the first mini-block block_start = d_start; // only call init if there are actually encoded values if (value_count > 1) { init_mini_block(false); } } // skip to the start of the next mini-block. should only be called on thread 0. // calls init_binary_block if currently on the last mini-block in a block. // is_decode indicates whether this is being called from initialization code (false) or // the actual decoding (true) inline __device__ void setup_next_mini_block(bool is_decode) { if (current_value_idx >= num_encoded_values(is_decode)) { return; } current_value_idx += values_per_mb; // just set pointer to start of next mini_block if (cur_mb < mini_block_count - 1) { cur_mb_start += cur_bitwidths[cur_mb] * values_per_mb / 8; cur_mb++; } // out of mini-blocks, start a new block else { block_start = cur_mb_start + cur_bitwidths[cur_mb] * values_per_mb / 8; init_mini_block(is_decode); } } // given start/end pointers in the data, find the end of the binary encoded block. when done, // `this` will be initialized with the correct start and end positions. returns the end, which is // start of data/next block. should only be called from thread 0. inline __device__ uint8_t const* find_end_of_block(uint8_t const* start, uint8_t const* end) { // read block header init_binary_block(start, end); // test for no encoded values. a single value will be in the block header. if (value_count <= 1) { return block_start; } // read mini-block headers and skip over data while (current_value_idx < num_encoded_values(false)) { setup_next_mini_block(false); } // calculate the correct end of the block auto const* const new_end = cur_mb == 0 ? block_start : cur_mb_start; // re-init block with correct end init_binary_block(start, new_end); return new_end; } // decode the current mini-batch of deltas, and convert to values. // called by all threads in a warp, currently only one warp supported. inline __device__ void calc_mini_block_values(int lane_id) { using cudf::detail::warp_size; if (current_value_idx >= value_count) { return; } // need to account for the first value from header on first pass if (current_value_idx == 0) { if (lane_id == 0) { current_value_idx++; } __syncwarp(); if (current_value_idx >= value_count) { return; } } uint32_t const mb_bits = cur_bitwidths[cur_mb]; // need to do in multiple passes if values_per_mb != 32 uint32_t const num_pass = values_per_mb / warp_size; auto d_start = cur_mb_start; for (int i = 0; i < num_pass; i++) { // position at end of the current mini-block since the following calculates // negative indexes d_start += (warp_size * mb_bits) / 8; // unpack deltas. modified from version in gpuDecodeDictionaryIndices(), but // that one only unpacks up to bitwidths of 24. simplified some since this // will always do batches of 32. // NOTE: because this needs to handle up to 64 bits, the branching used in the other // implementation has been replaced with a loop. While this uses more registers, the // looping version is just as fast and easier to read. Might need to revisit this when // DELTA_BYTE_ARRAY is implemented. zigzag128_t delta = 0; if (lane_id + current_value_idx < value_count) { int32_t ofs = (lane_id - warp_size) * mb_bits; uint8_t const* p = d_start + (ofs >> 3); ofs &= 7; if (p < block_end) { uint32_t c = 8 - ofs; // 0 - 7 bits delta = (*p++) >> ofs; while (c < mb_bits && p < block_end) { delta |= static_cast<zigzag128_t>(*p++) << c; c += 8; } delta &= (static_cast<zigzag128_t>(1) << mb_bits) - 1; } } // add min delta to get true delta delta += cur_min_delta; // do inclusive scan to get value - first_value at each position __shared__ cub::WarpScan<int64_t>::TempStorage temp_storage; cub::WarpScan<int64_t>(temp_storage).InclusiveSum(delta, delta); // now add first value from header or last value from previous block to get true value delta += last_value; int const value_idx = rolling_index<delta_rolling_buf_size>(current_value_idx + warp_size * i + lane_id); value[value_idx] = delta; // save value from last lane in warp. this will become the 'first value' added to the // deltas calculated in the next iteration (or invocation). if (lane_id == warp_size - 1) { last_value = delta; } __syncwarp(); } } // decodes and skips values until the block containing the value after `skip` is reached. // called by all threads in a thread block. inline __device__ void skip_values(int skip) { using cudf::detail::warp_size; int const t = threadIdx.x; int const lane_id = t % warp_size; while (current_value_idx < skip && current_value_idx < num_encoded_values(true)) { if (t < warp_size) { calc_mini_block_values(lane_id); if (lane_id == 0) { setup_next_mini_block(true); } } __syncthreads(); } } // decodes the current mini block and stores the values obtained. should only be called by // a single warp. inline __device__ void decode_batch() { using cudf::detail::warp_size; int const t = threadIdx.x; int const lane_id = t % warp_size; // unpack deltas and save in db->value calc_mini_block_values(lane_id); // set up for next mini-block if (lane_id == 0) { setup_next_mini_block(true); } } }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_decode.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet_gpu.hpp" #include "rle_stream.cuh" #include <io/utilities/block_utils.cuh> #include <cuda/atomic> #include <cuda/std/tuple> namespace cudf::io::parquet::detail { struct page_state_s { constexpr page_state_s() noexcept {} uint8_t const* data_start{}; uint8_t const* data_end{}; uint8_t const* lvl_end{}; uint8_t const* dict_base{}; // ptr to dictionary page data int32_t dict_size{}; // size of dictionary data int32_t first_row{}; // First row in page to output int32_t num_rows{}; // Rows in page to decode (including rows to be skipped) int32_t first_output_value{}; // First value in page to output int32_t num_input_values{}; // total # of input/level values in the page int32_t dtype_len{}; // Output data type length int32_t dtype_len_in{}; // Can be larger than dtype_len if truncating 32-bit into 8-bit int32_t dict_bits{}; // # of bits to store dictionary indices uint32_t dict_run{}; int32_t dict_val{}; uint32_t initial_rle_run[NUM_LEVEL_TYPES]{}; // [def,rep] int32_t initial_rle_value[NUM_LEVEL_TYPES]{}; // [def,rep] int32_t error{}; PageInfo page{}; ColumnChunkDesc col{}; // (leaf) value decoding int32_t nz_count{}; // number of valid entries in nz_idx (write position in circular buffer) int32_t dict_pos{}; // write position of dictionary indices int32_t src_pos{}; // input read position of final output value int32_t ts_scale{}; // timestamp scale: <0: divide by -ts_scale, >0: multiply by ts_scale // repetition/definition level decoding int32_t input_value_count{}; // how many values of the input we've processed int32_t input_row_count{}; // how many rows of the input we've processed int32_t input_leaf_count{}; // how many leaf values of the input we've processed uint8_t const* lvl_start[NUM_LEVEL_TYPES]{}; // [def,rep] uint8_t const* abs_lvl_start[NUM_LEVEL_TYPES]{}; // [def,rep] uint8_t const* abs_lvl_end[NUM_LEVEL_TYPES]{}; // [def,rep] int32_t lvl_count[NUM_LEVEL_TYPES]{}; // how many of each of the streams we've decoded int32_t row_index_lower_bound{}; // lower bound of row indices we should process // a shared-memory cache of frequently used data when decoding. The source of this data is // normally stored in global memory which can yield poor performance. So, when possible // we copy that info here prior to decoding PageNestingDecodeInfo nesting_decode_cache[max_cacheable_nesting_decode_info]{}; // points to either nesting_decode_cache above when possible, or to the global source otherwise PageNestingDecodeInfo* nesting_info{}; inline __device__ void set_error_code(decode_error err) { cuda::atomic_ref<int32_t, cuda::thread_scope_block> ref{error}; ref.fetch_or(static_cast<int32_t>(err), cuda::std::memory_order_relaxed); } inline __device__ void reset_error_code() { cuda::atomic_ref<int32_t, cuda::thread_scope_block> ref{error}; ref.store(0, cuda::std::memory_order_release); } }; // buffers only used in the decode kernel. separated from page_state_s to keep // shared memory usage in other kernels (eg, gpuComputePageSizes) down. template <int _nz_buf_size, int _dict_buf_size, int _str_buf_size> struct page_state_buffers_s { static constexpr int nz_buf_size = _nz_buf_size; static constexpr int dict_buf_size = _dict_buf_size; static constexpr int str_buf_size = _str_buf_size; uint32_t nz_idx[nz_buf_size]; // circular buffer of non-null value positions uint32_t dict_idx[dict_buf_size]; // Dictionary index, boolean, or string offset values uint32_t str_len[str_buf_size]; // String length for plain encoding of strings }; // Copies null counts back to `nesting_decode` at the end of scope struct null_count_back_copier { page_state_s* s; int t; __device__ ~null_count_back_copier() { if (s->nesting_info != nullptr and s->nesting_info == s->nesting_decode_cache) { int depth = 0; while (depth < s->page.num_output_nesting_levels) { int const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { s->page.nesting_decode[thread_depth].null_count = s->nesting_decode_cache[thread_depth].null_count; } depth += blockDim.x; } } } }; /** * @brief Test if the given page is in a string column */ constexpr bool is_string_col(PageInfo const& page, device_span<ColumnChunkDesc const> chunks) { if (page.flags & PAGEINFO_FLAGS_DICTIONARY != 0) { return false; } auto const& col = chunks[page.chunk_idx]; return is_string_col(col); } /** * @brief Returns whether or not a page spans either the beginning or the end of the * specified row bounds * * @param s The page to be checked * @param start_row The starting row index * @param num_rows The number of rows * @param has_repetition True if the schema has nesting * * @return True if the page spans the beginning or the end of the row bounds */ inline __device__ bool is_bounds_page(page_state_s* const s, size_t start_row, size_t num_rows, bool has_repetition) { size_t const page_begin = s->col.start_row + s->page.chunk_row; size_t const page_end = page_begin + s->page.num_rows; size_t const begin = start_row; size_t const end = start_row + num_rows; // for non-nested schemas, rows cannot span pages, so use a more restrictive test return has_repetition ? ((page_begin <= begin && page_end >= begin) || (page_begin <= end && page_end >= end)) : ((page_begin < begin && page_end > begin) || (page_begin < end && page_end > end)); } /** * @brief Returns whether or not a page is completely contained within the specified * row bounds * * @param s The page to be checked * @param start_row The starting row index * @param num_rows The number of rows * * @return True if the page is completely contained within the row bounds */ inline __device__ bool is_page_contained(page_state_s* const s, size_t start_row, size_t num_rows) { size_t const page_begin = s->col.start_row + s->page.chunk_row; size_t const page_end = page_begin + s->page.num_rows; size_t const begin = start_row; size_t const end = start_row + num_rows; return page_begin >= begin && page_end <= end; } /** * @brief Retrieves string information for a string at the specified source position * * @param[in] s Page state input * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @tparam state_buf Typename of the `state_buf` (usually inferred) * * @return A pair containing a pointer to the string and its length */ template <typename state_buf> inline __device__ cuda::std::pair<char const*, size_t> gpuGetStringData(page_state_s* s, state_buf* sb, int src_pos) { char const* ptr = nullptr; size_t len = 0; if (s->dict_base) { // String dictionary uint32_t dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] * sizeof(string_index_pair) : 0; if (dict_pos < (uint32_t)s->dict_size) { auto const* src = reinterpret_cast<string_index_pair const*>(s->dict_base + dict_pos); ptr = src->first; len = src->second; } } else { // Plain encoding uint32_t dict_pos = sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)]; if (dict_pos <= (uint32_t)s->dict_size) { ptr = reinterpret_cast<char const*>(s->data_start + dict_pos); len = sb->str_len[rolling_index<state_buf::str_buf_size>(src_pos)]; } } return {ptr, len}; } /** * @brief Performs RLE decoding of dictionary indexes * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] target_pos Target index position in dict_idx buffer (may exceed this value by up to * 31) * @param[in] t Warp1 thread ID (0..31) * @tparam sizes_only True if only sizes are to be calculated * @tparam state_buf Typename of the `state_buf` (usually inferred) * * @return A pair containing the new output position, and the total length of strings decoded (this * will only be valid on thread 0 and if sizes_only is true). In the event that this function * decodes strings beyond target_pos, the total length of strings returned will include these * additional values. */ template <bool sizes_only, typename state_buf> __device__ cuda::std::pair<int, int> gpuDecodeDictionaryIndices(page_state_s* s, [[maybe_unused]] state_buf* sb, int target_pos, int t) { uint8_t const* end = s->data_end; int dict_bits = s->dict_bits; int pos = s->dict_pos; int str_len = 0; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; uint8_t const* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value int bytecnt = (dict_bits + 7) >> 3; if (cur + bytecnt <= end) { int32_t run_val = cur[0]; if (bytecnt > 1) { run_val |= cur[1] << 8; if (bytecnt > 2) { run_val |= cur[2] << 16; if (bytecnt > 3) { run_val |= cur[3] << 24; } } } s->dict_val = run_val & ((1 << dict_bits) - 1); } cur += bytecnt; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8 * dict_bits; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); // compute dictionary index. int dict_idx = 0; if (t < batch_len) { dict_idx = s->dict_val; if (is_literal) { int32_t ofs = (t - ((batch_len + 7) & ~7)) * dict_bits; uint8_t const* p = s->data_start + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } } // if we're not computing sizes, store off the dictionary index if constexpr (!sizes_only) { sb->dict_idx[rolling_index<state_buf::dict_buf_size>(pos + t)] = dict_idx; } } // if we're computing sizes, add the length(s) if constexpr (sizes_only) { int const len = [&]() { if (t >= batch_len || (pos + t >= target_pos)) { return 0; } uint32_t const dict_pos = (s->dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0; if (dict_pos < (uint32_t)s->dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(s->dict_base + dict_pos); return src->second; } return 0; }(); using WarpReduce = cub::WarpReduce<size_type>; __shared__ typename WarpReduce::TempStorage temp_storage; // note: str_len will only be valid on thread 0. str_len += WarpReduce(temp_storage).Sum(len); } pos += batch_len; } return {pos, str_len}; } /** * @brief Performs RLE decoding of dictionary indexes, for when dict_size=1 * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] target_pos Target write position * @param[in] t Thread ID * @tparam state_buf Typename of the `state_buf` (usually inferred) * * @return The new output position */ template <typename state_buf> inline __device__ int gpuDecodeRleBooleans(page_state_s* s, state_buf* sb, int target_pos, int t) { uint8_t const* end = s->data_end; int64_t pos = s->dict_pos; while (pos < target_pos) { int is_literal, batch_len; if (!t) { uint32_t run = s->dict_run; uint8_t const* cur = s->data_start; if (run <= 1) { run = (cur < end) ? get_vlq32(cur, end) : 0; if (!(run & 1)) { // Repeated value s->dict_val = (cur < end) ? cur[0] & 1 : 0; cur++; } } if (run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(32, (int)(run >> 1) * 8), 1); if (batch_len >= 8) { batch_len &= ~7; } batch_len_div8 = (batch_len + 7) >> 3; run -= batch_len_div8 * 2; cur += batch_len_div8; } else { batch_len = max(min(32, (int)(run >> 1)), 1); run -= batch_len * 2; } s->dict_run = run; s->data_start = cur; is_literal = run & 1; __threadfence_block(); } __syncwarp(); is_literal = shuffle(is_literal); batch_len = shuffle(batch_len); if (t < batch_len) { int dict_idx; if (is_literal) { int32_t ofs = t - ((batch_len + 7) & ~7); uint8_t const* p = s->data_start + (ofs >> 3); dict_idx = (p < end) ? (p[0] >> (ofs & 7u)) & 1 : 0; } else { dict_idx = s->dict_val; } sb->dict_idx[rolling_index<state_buf::dict_buf_size>(pos + t)] = dict_idx; } pos += batch_len; } return pos; } /** * @brief Parses the length and position of strings and returns total length of all strings * processed * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] target_pos Target output position * @param[in] t Thread ID * @tparam sizes_only True if only sizes are to be calculated * @tparam state_buf Typename of the `state_buf` (usually inferred) * * @return Total length of strings processed */ template <bool sizes_only, typename state_buf> __device__ size_type gpuInitStringDescriptors(page_state_s* s, [[maybe_unused]] state_buf* sb, int target_pos, int t) { int pos = s->dict_pos; int total_len = 0; // This step is purely serial if (!t) { uint8_t const* cur = s->data_start; int dict_size = s->dict_size; int k = s->dict_val; while (pos < target_pos) { int len = 0; if ((s->col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) { if (k < dict_size) { len = s->dtype_len_in; } } else { if (k + 4 <= dict_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > dict_size) { len = 0; } } } if constexpr (!sizes_only) { sb->dict_idx[rolling_index<state_buf::dict_buf_size>(pos)] = k; sb->str_len[rolling_index<state_buf::str_buf_size>(pos)] = len; } k += len; total_len += len; pos++; } s->dict_val = k; __threadfence_block(); } return total_len; } /** * @brief Decode values out of a definition or repetition stream * * @param[out] output Level buffer output * @param[in,out] s Page state input/output * @param[in] target_count Target count of stream values on output * @param[in] t Warp0 thread ID (0..31) * @param[in] lvl The level type we are decoding - DEFINITION or REPETITION * @tparam level_t Type used to store decoded repetition and definition levels * @tparam rolling_buf_size Size of the cyclic buffer used to store value data */ template <typename level_t, int rolling_buf_size> __device__ void gpuDecodeStream( level_t* output, page_state_s* s, int32_t target_count, int t, level_type lvl) { uint8_t const* cur_def = s->lvl_start[lvl]; uint8_t const* end = s->lvl_end; uint32_t level_run = s->initial_rle_run[lvl]; int32_t level_val = s->initial_rle_value[lvl]; int level_bits = s->col.level_bits[lvl]; int32_t num_input_values = s->num_input_values; int32_t value_count = s->lvl_count[lvl]; int32_t batch_coded_count = 0; while (s->error == 0 && value_count < target_count && value_count < num_input_values) { int batch_len; if (level_run <= 1) { // Get a new run symbol from the byte stream int sym_len = 0; if (!t) { uint8_t const* cur = cur_def; if (cur < end) { level_run = get_vlq32(cur, end); } if (!(level_run & 1)) { if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8) { if (cur < end) level_val |= cur[0] << 8; cur++; } } if (cur > end) { s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); break; } if (level_run <= 1) { s->set_error_code(decode_error::INVALID_LEVEL_RUN); break; } sym_len = (int32_t)(cur - cur_def); __threadfence_block(); } sym_len = shuffle(sym_len); level_val = shuffle(level_val); level_run = shuffle(level_run); cur_def += sym_len; } if (s->error != 0) { break; } batch_len = min(num_input_values - value_count, 32); if (level_run & 1) { // Literal run int batch_len8; batch_len = min(batch_len, (level_run >> 1) * 8); batch_len8 = (batch_len + 7) >> 3; if (t < batch_len) { int bitpos = t * level_bits; uint8_t const* cur = cur_def + (bitpos >> 3); bitpos &= 7; if (cur < end) level_val = cur[0]; cur++; if (level_bits > 8 - bitpos && cur < end) { level_val |= cur[0] << 8; cur++; if (level_bits > 16 - bitpos && cur < end) level_val |= cur[0] << 16; } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } level_run -= batch_len8 * 2; cur_def += batch_len8 * level_bits; } else { // Repeated value batch_len = min(batch_len, level_run >> 1); level_run -= batch_len * 2; } if (t < batch_len) { int idx = value_count + t; output[rolling_index<rolling_buf_size>(idx)] = level_val; } batch_coded_count += batch_len; value_count += batch_len; } // update the stream info if (!t) { s->lvl_start[lvl] = cur_def; s->initial_rle_run[lvl] = level_run; s->initial_rle_value[lvl] = level_val; s->lvl_count[lvl] = value_count; } } /** * @brief Store a validity mask containing value_count bits into the output validity buffer of the * page. * * @param[in,out] nesting_info The page/nesting information to store the mask in. The validity map * offset is also updated * @param[in,out] valid_map Pointer to bitmask to store validity information to * @param[in] valid_mask The validity mask to be stored * @param[in] value_count # of bits in the validity mask */ inline __device__ void store_validity(int valid_map_offset, bitmask_type* valid_map, uint32_t valid_mask, int32_t value_count) { int word_offset = valid_map_offset / 32; int bit_offset = valid_map_offset % 32; // if we fit entirely in the output word if (bit_offset + value_count <= 32) { auto relevant_mask = static_cast<uint32_t>((static_cast<uint64_t>(1) << value_count) - 1); if (relevant_mask == ~0) { valid_map[word_offset] = valid_mask; } else { atomicAnd(valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(valid_map + word_offset, (valid_mask & relevant_mask) << bit_offset); } } // we're going to spill over into the next word. // note : writing both values here is the lazy/slow way. we could be writing just // the first word and rolling the remaining bits over into the next call. // however, some basic performance tests shows almost no difference between these two // methods. More detailed performance testing might be worthwhile here. else { uint32_t bits_left = 32 - bit_offset; // first word. strip bits_left bits off the beginning and store that uint32_t relevant_mask = ((1 << bits_left) - 1); uint32_t mask_word0 = valid_mask & relevant_mask; atomicAnd(valid_map + word_offset, ~(relevant_mask << bit_offset)); atomicOr(valid_map + word_offset, mask_word0 << bit_offset); // second word. strip the remainder of the bits off the end and store that relevant_mask = ((1 << (value_count - bits_left)) - 1); uint32_t mask_word1 = valid_mask & (relevant_mask << bits_left); atomicAnd(valid_map + word_offset + 1, ~(relevant_mask)); atomicOr(valid_map + word_offset + 1, mask_word1 >> bits_left); } } /** * @brief Compute the nesting bounds within the hierarchy to add values to, and the definition level * D to which we should considered them null or not. * * @param[out] start_depth The start nesting depth * @param[out] end_depth The end nesting depth (inclusive) * @param[out] d The definition level up to which added values are not-null. if t is out of bounds, * d will be -1 * @param[in] s Local page information * @param[in] rep Repetition level buffer * @param[in] def Definition level buffer * @param[in] input_value_count The current count of input level values we have processed * @param[in] target_input_value_count The desired # of input level values we want to process * @param[in] t Thread index * @tparam rolling_buf_size Size of the cyclic buffer used to store value data * @tparam level_t Type used to store decoded repetition and definition levels */ template <int rolling_buf_size, typename level_t> inline __device__ void get_nesting_bounds(int& start_depth, int& end_depth, int& d, page_state_s* s, level_t const* const rep, level_t const* const def, int input_value_count, int32_t target_input_value_count, int t) { start_depth = -1; end_depth = -1; d = -1; if (input_value_count + t < target_input_value_count) { int const index = rolling_index<rolling_buf_size>(input_value_count + t); d = static_cast<int>(def[index]); // if we have repetition (there are list columns involved) we have to // bound what nesting levels we apply values to if (s->col.max_level[level_type::REPETITION] > 0) { int r = rep[index]; start_depth = s->nesting_info[r].start_depth; end_depth = s->nesting_info[d].end_depth; } // for columns without repetition (even ones involving structs) we always // traverse the entire hierarchy. else { start_depth = 0; end_depth = s->col.max_nesting_depth - 1; } } } /** * @brief Process a batch of incoming repetition/definition level values and generate * validity, nested column offsets (where appropriate) and decoding indices. * * @param[in] target_input_value_count The # of repetition/definition levels to process up to * @param[in] s Local page information * @param[out] sb Page state buffer output * @param[in] rep Repetition level buffer * @param[in] def Definition level buffer * @param[in] t Thread index * @tparam level_t Type used to store decoded repetition and definition levels * @tparam state_buf Typename of the `state_buf` (usually inferred) * @tparam rolling_buf_size Size of the cyclic buffer used to store value data */ template <typename level_t, typename state_buf, int rolling_buf_size> __device__ void gpuUpdateValidityOffsetsAndRowIndices(int32_t target_input_value_count, page_state_s* s, state_buf* sb, level_t const* const rep, level_t const* const def, int t) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // how many (input) values we've processed in the page so far int input_value_count = s->input_value_count; // how many rows we've processed in the page so far int input_row_count = s->input_row_count; PageNestingDecodeInfo* nesting_info_base = s->nesting_info; // process until we've reached the target while (input_value_count < target_input_value_count) { // determine the nesting bounds for this thread (the range of nesting depths we // will generate new value indices and validity bits for) int start_depth, end_depth, d; get_nesting_bounds<rolling_buf_size, level_t>( start_depth, end_depth, d, s, rep, def, input_value_count, target_input_value_count, t); // 4 interesting things to track: // thread_value_count : # of output values from the view of this thread // warp_value_count : # of output values for the whole warp // // thread_valid_count : # of valid values from the view of this thread // warp_valid_count : # of valid values for the whole warp uint32_t thread_value_count, warp_value_count; uint32_t thread_valid_count, warp_valid_count; // track (page-relative) row index for the thread so we can compare against input bounds // keep track of overall # of rows we've read. int const is_new_row = start_depth == 0 ? 1 : 0; uint32_t const warp_row_count_mask = ballot(is_new_row); int32_t const thread_row_index = input_row_count + ((__popc(warp_row_count_mask & ((1 << t) - 1)) + is_new_row) - 1); input_row_count += __popc(warp_row_count_mask); // is this thread within read row bounds? int const in_row_bounds = thread_row_index >= s->row_index_lower_bound && thread_row_index < (s->first_row + s->num_rows) ? 1 : 0; // compute warp and thread value counts uint32_t const warp_count_mask = ballot((0 >= start_depth && 0 <= end_depth) && in_row_bounds ? 1 : 0); warp_value_count = __popc(warp_count_mask); // Note : ((1 << t) - 1) implies "for all threads before me" thread_value_count = __popc(warp_count_mask & ((1 << t) - 1)); // walk from 0 to max_depth uint32_t next_thread_value_count, next_warp_value_count; for (int s_idx = 0; s_idx < max_depth; s_idx++) { PageNestingDecodeInfo* nesting_info = &nesting_info_base[s_idx]; // if we are within the range of nesting levels we should be adding value indices for int const in_nesting_bounds = ((s_idx >= start_depth && s_idx <= end_depth) && in_row_bounds) ? 1 : 0; // everything up to the max_def_level is a non-null value uint32_t const is_valid = d >= nesting_info->max_def_level && in_nesting_bounds ? 1 : 0; // compute warp and thread valid counts uint32_t const warp_valid_mask = // for flat schemas, a simple ballot_sync gives us the correct count and bit positions // because every value in the input matches to a value in the output !has_repetition ? ballot(is_valid) : // for nested schemas, it's more complicated. This warp will visit 32 incoming values, // however not all of them will necessarily represent a value at this nesting level. so // the validity bit for thread t might actually represent output value t-6. the correct // position for thread t's bit is thread_value_count. for cuda 11 we could use // __reduce_or_sync(), but until then we have to do a warp reduce. WarpReduceOr32(is_valid << thread_value_count); thread_valid_count = __popc(warp_valid_mask & ((1 << thread_value_count) - 1)); warp_valid_count = __popc(warp_valid_mask); // if this is the value column emit an index for value decoding if (is_valid && s_idx == max_depth - 1) { int const src_pos = nesting_info->valid_count + thread_valid_count; int const dst_pos = nesting_info->value_count + thread_value_count; // nz_idx is a mapping of src buffer indices to destination buffer indices sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)] = dst_pos; } // compute warp and thread value counts for the -next- nesting level. we need to // do this for nested schemas so that we can emit an offset for the -current- nesting // level. more concretely : the offset for the current nesting level == current length of the // next nesting level if (s_idx < max_depth - 1) { uint32_t const next_warp_count_mask = ballot((s_idx + 1 >= start_depth && s_idx + 1 <= end_depth && in_row_bounds) ? 1 : 0); next_warp_value_count = __popc(next_warp_count_mask); next_thread_value_count = __popc(next_warp_count_mask & ((1 << t) - 1)); // if we're -not- at a leaf column and we're within nesting/row bounds // and we have a valid data_out pointer, it implies this is a list column, so // emit an offset. if (in_nesting_bounds && nesting_info->data_out != nullptr) { int const idx = nesting_info->value_count + thread_value_count; cudf::size_type const ofs = nesting_info_base[s_idx + 1].value_count + next_thread_value_count + nesting_info_base[s_idx + 1].page_start_value; (reinterpret_cast<cudf::size_type*>(nesting_info->data_out))[idx] = ofs; } } // nested schemas always read and write to the same bounds (that is, read and write positions // are already pre-bounded by first_row/num_rows). flat schemas will start reading at the // first value, even if that is before first_row, because we cannot trivially jump to // the correct position to start reading. since we are about to write the validity vector here // we need to adjust our computed mask to take into account the write row bounds. int const in_write_row_bounds = !has_repetition ? thread_row_index >= s->first_row && thread_row_index < (s->first_row + s->num_rows) : in_row_bounds; int const first_thread_in_write_range = !has_repetition ? __ffs(ballot(in_write_row_bounds)) - 1 : 0; // # of bits to of the validity mask to write out int const warp_valid_mask_bit_count = first_thread_in_write_range < 0 ? 0 : warp_value_count - first_thread_in_write_range; // increment count of valid values, count of total values, and update validity mask if (!t) { if (nesting_info->valid_map != nullptr && warp_valid_mask_bit_count > 0) { uint32_t const warp_output_valid_mask = warp_valid_mask >> first_thread_in_write_range; store_validity(nesting_info->valid_map_offset, nesting_info->valid_map, warp_output_valid_mask, warp_valid_mask_bit_count); nesting_info->valid_map_offset += warp_valid_mask_bit_count; nesting_info->null_count += warp_valid_mask_bit_count - __popc(warp_output_valid_mask); } nesting_info->valid_count += warp_valid_count; nesting_info->value_count += warp_value_count; } // propagate value counts for the next level warp_value_count = next_warp_value_count; thread_value_count = next_thread_value_count; } input_value_count += min(32, (target_input_value_count - input_value_count)); __syncwarp(); } // update if (!t) { // update valid value count for decoding and total # of values we've processed s->nz_count = nesting_info_base[max_depth - 1].valid_count; s->input_value_count = input_value_count; s->input_row_count = input_row_count; } } /** * @brief Process repetition and definition levels up to the target count of leaf values. * * In order to decode actual leaf values from the input stream, we need to generate the * list of non-null value positions (page_state_s::nz_idx). We do this by processing * the repetition and definition level streams. This process also generates validity information, * and offset column values in the case of nested schemas. Because of the way the streams * are encoded, this function may generate slightly more than target_leaf_count. * * Only runs on 1 warp. * * @param[in] s The local page state * @param[out] sb Page state buffer output * @param[in] target_leaf_count Target count of non-null leaf values to generate indices for * @param[in] rep Repetition level buffer * @param[in] def Definition level buffer * @param[in] t Thread index * @tparam rolling_buf_size Size of the cyclic buffer used to store value data * @tparam level_t Type used to store decoded repetition and definition levels * @tparam state_buf Typename of the `state_buf` (usually inferred) */ template <int rolling_buf_size, typename level_t, typename state_buf> __device__ void gpuDecodeLevels(page_state_s* s, state_buf* sb, int32_t target_leaf_count, level_t* const rep, level_t* const def, int t) { bool has_repetition = s->col.max_level[level_type::REPETITION] > 0; constexpr int batch_size = 32; int cur_leaf_count = target_leaf_count; while (s->error == 0 && s->nz_count < target_leaf_count && s->input_value_count < s->num_input_values) { if (has_repetition) { gpuDecodeStream<level_t, rolling_buf_size>(rep, s, cur_leaf_count, t, level_type::REPETITION); } gpuDecodeStream<level_t, rolling_buf_size>(def, s, cur_leaf_count, t, level_type::DEFINITION); __syncwarp(); // because the rep and def streams are encoded separately, we cannot request an exact // # of values to be decoded at once. we can only process the lowest # of decoded rep/def // levels we get. int actual_leaf_count = has_repetition ? min(s->lvl_count[level_type::REPETITION], s->lvl_count[level_type::DEFINITION]) : s->lvl_count[level_type::DEFINITION]; // process what we got back gpuUpdateValidityOffsetsAndRowIndices<level_t, state_buf, rolling_buf_size>( actual_leaf_count, s, sb, rep, def, t); cur_leaf_count = actual_leaf_count + batch_size; __syncwarp(); } } /** * @brief Parse the beginning of the level section (definition or repetition), * initializes the initial RLE run & value, and returns the section length * * @param[in,out] s The page state * @param[in] cur The current data position * @param[in] end The end of the data * @param[in] lvl Enum indicating whether this is to initialize repetition or definition level data * * @return The length of the section */ inline __device__ uint32_t InitLevelSection(page_state_s* s, uint8_t const* cur, uint8_t const* end, level_type lvl) { int32_t len; int const level_bits = s->col.level_bits[lvl]; auto const encoding = lvl == level_type::DEFINITION ? s->page.definition_level_encoding : s->page.repetition_level_encoding; auto start = cur; auto init_rle = [s, lvl, end, level_bits](uint8_t const* cur, uint8_t const* end) { uint32_t const run = get_vlq32(cur, end); s->initial_rle_run[lvl] = run; if (!(run & 1)) { if (cur < end) { int v = cur[0]; cur++; if (level_bits > 8) { v |= ((cur < end) ? cur[0] : 0) << 8; cur++; } s->initial_rle_value[lvl] = v; } else { s->initial_rle_value[lvl] = 0; } } s->lvl_start[lvl] = cur; if (cur > end) { s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); } }; // this is a little redundant. if level_bits == 0, then nothing should be encoded // for the level, but some V2 files in the wild violate this and encode the data anyway. // thus we will handle V2 headers separately. if ((s->page.flags & PAGEINFO_FLAGS_V2) != 0 && (len = s->page.lvl_bytes[lvl]) != 0) { // V2 only uses RLE encoding so no need to check encoding s->abs_lvl_start[lvl] = cur; init_rle(cur, cur + len); } else if (level_bits == 0) { len = 0; s->initial_rle_run[lvl] = s->page.num_input_values * 2; // repeated value s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; s->abs_lvl_start[lvl] = cur; } else if (encoding == Encoding::RLE) { // V1 header with RLE encoding if (cur + 4 < end) { len = (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; s->abs_lvl_start[lvl] = cur; init_rle(cur, cur + len); // add back the 4 bytes for the length len += 4; } else { len = 0; s->set_error_code(decode_error::LEVEL_STREAM_OVERRUN); } } else if (encoding == Encoding::BIT_PACKED) { len = (s->page.num_input_values * level_bits + 7) >> 3; s->initial_rle_run[lvl] = ((s->page.num_input_values + 7) >> 3) * 2 + 1; // literal run s->initial_rle_value[lvl] = 0; s->lvl_start[lvl] = cur; s->abs_lvl_start[lvl] = cur; } else { len = 0; s->set_error_code(decode_error::UNSUPPORTED_ENCODING); } s->abs_lvl_end[lvl] = start + len; return static_cast<uint32_t>(len); } /** * @brief Functor for setupLocalPageInfo that always returns true. */ struct all_types_filter { __device__ inline bool operator()(PageInfo const& page) { return true; } }; /** * @brief Functor for setupLocalPageInfo that takes a mask of allowed types. */ struct mask_filter { uint32_t mask; __device__ mask_filter(uint32_t m) : mask(m) {} __device__ mask_filter(decode_kernel_mask m) : mask(static_cast<uint32_t>(m)) {} __device__ inline bool operator()(PageInfo const& page) { return BitAnd(mask, page.kernel_mask) != 0; } }; /** * @brief Sets up block-local page state information from the global pages. * * @param[in, out] s The local page state to be filled in * @param[in] p The global page to be copied from * @param[in] chunks The global list of chunks * @param[in] min_row Crop all rows below min_row * @param[in] num_rows Maximum number of rows to read * @param[in] filter Filtering function used to decide which pages to operate on * @param[in] is_decode_step If we are setting up for the decode step (instead of the preprocess) * @tparam Filter Function that takes a PageInfo reference and returns true if the given page should * be operated on Currently only used by gpuComputePageSizes step) * @return True if this page should be processed further */ template <typename Filter> inline __device__ bool setupLocalPageInfo(page_state_s* const s, PageInfo const* p, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, Filter filter, bool is_decode_step) { int t = threadIdx.x; // Fetch page info if (!t) { s->page = *p; s->nesting_info = nullptr; s->col = chunks[s->page.chunk_idx]; } __syncthreads(); // return false if this is a dictionary page or it does not pass the filter condition if ((s->page.flags & PAGEINFO_FLAGS_DICTIONARY) != 0 || !filter(s->page)) { return false; } // our starting row (absolute index) is // col.start_row == absolute row index // page.chunk-row == relative row index within the chunk size_t const page_start_row = s->col.start_row + s->page.chunk_row; // if we can use the nesting decode cache, set it up now auto const can_use_decode_cache = s->page.nesting_info_size <= max_cacheable_nesting_decode_info; if (can_use_decode_cache) { int depth = 0; while (depth < s->page.nesting_info_size) { int const thread_depth = depth + t; if (thread_depth < s->page.nesting_info_size) { // these values need to be copied over from global s->nesting_decode_cache[thread_depth].max_def_level = s->page.nesting_decode[thread_depth].max_def_level; s->nesting_decode_cache[thread_depth].page_start_value = s->page.nesting_decode[thread_depth].page_start_value; s->nesting_decode_cache[thread_depth].start_depth = s->page.nesting_decode[thread_depth].start_depth; s->nesting_decode_cache[thread_depth].end_depth = s->page.nesting_decode[thread_depth].end_depth; } depth += blockDim.x; } } if (!t) { s->nesting_info = can_use_decode_cache ? s->nesting_decode_cache : s->page.nesting_decode; // NOTE: s->page.num_rows, s->col.chunk_row, s->first_row and s->num_rows will be // invalid/bogus during first pass of the preprocess step for nested types. this is ok // because we ignore these values in that stage. auto const max_row = min_row + num_rows; // if we are totally outside the range of the input, do nothing if ((page_start_row > max_row) || (page_start_row + s->page.num_rows < min_row)) { s->first_row = 0; s->num_rows = 0; } // otherwise else { s->first_row = page_start_row >= min_row ? 0 : min_row - page_start_row; auto const max_page_rows = s->page.num_rows - s->first_row; s->num_rows = (page_start_row + s->first_row) + max_page_rows <= max_row ? max_page_rows : max_row - (page_start_row + s->first_row); } } __syncthreads(); // zero counts int depth = 0; while (depth < s->page.num_output_nesting_levels) { int const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { s->nesting_info[thread_depth].valid_count = 0; s->nesting_info[thread_depth].value_count = 0; s->nesting_info[thread_depth].null_count = 0; } depth += blockDim.x; } __syncthreads(); // if we have no work to do (eg, in a skip_rows/num_rows case) in this page. // // corner case: in the case of lists, we can have pages that contain "0" rows if the current row // starts before this page and ends after this page: // P0 P1 P2 // |---------|---------|----------| // ^------------------^ // row start row end // P1 will contain 0 rows // // NOTE: this check needs to be done after the null counts have been zeroed out bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; if (is_decode_step && s->num_rows == 0 && !(has_repetition && (is_bounds_page(s, min_row, num_rows, has_repetition) || is_page_contained(s, min_row, num_rows)))) { return false; } if (!t) { s->reset_error_code(); // IMPORTANT : nested schemas can have 0 rows in a page but still have // values. The case is: // - On page N-1, the last row starts, with 2/6 values encoded // - On page N, the remaining 4/6 values are encoded, but there are no new rows. // if (s->page.num_input_values > 0 && s->page.num_rows > 0) { if (s->page.num_input_values > 0) { uint8_t* cur = s->page.page_data; uint8_t* end = cur + s->page.uncompressed_page_size; uint32_t dtype_len_out = s->col.data_type >> 3; s->ts_scale = 0; // Validate data type auto const data_type = s->col.data_type & 7; switch (data_type) { case BOOLEAN: s->dtype_len = 1; // Boolean are stored as 1 byte on the output break; case INT32: [[fallthrough]]; case FLOAT: s->dtype_len = 4; break; case INT64: if (s->col.ts_clock_rate) { int32_t units = 0; // Duration types are not included because no scaling is done when reading if (s->col.converted_type == TIMESTAMP_MILLIS) { units = cudf::timestamp_ms::period::den; } else if (s->col.converted_type == TIMESTAMP_MICROS) { units = cudf::timestamp_us::period::den; } else if (s->col.logical_type.has_value() and s->col.logical_type->is_timestamp_nanos()) { units = cudf::timestamp_ns::period::den; } if (units and units != s->col.ts_clock_rate) { s->ts_scale = (s->col.ts_clock_rate < units) ? -(units / s->col.ts_clock_rate) : (s->col.ts_clock_rate / units); } } [[fallthrough]]; case DOUBLE: s->dtype_len = 8; break; case INT96: s->dtype_len = 12; break; case BYTE_ARRAY: if (s->col.converted_type == DECIMAL) { auto const decimal_precision = s->col.decimal_precision; s->dtype_len = [decimal_precision]() { if (decimal_precision <= MAX_DECIMAL32_PRECISION) { return sizeof(int32_t); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else { s->dtype_len = sizeof(string_index_pair); } break; default: // FIXED_LEN_BYTE_ARRAY: s->dtype_len = dtype_len_out; if (s->dtype_len <= 0) { s->set_error_code(decode_error::INVALID_DATA_TYPE); } break; } // Special check for downconversions s->dtype_len_in = s->dtype_len; if (data_type == FIXED_LEN_BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { s->dtype_len = [dtype_len = s->dtype_len]() { if (dtype_len <= sizeof(int32_t)) { return sizeof(int32_t); } else if (dtype_len <= sizeof(int64_t)) { return sizeof(int64_t); } else { return sizeof(__int128_t); } }(); } else { s->dtype_len = sizeof(string_index_pair); } } else if (data_type == INT32) { if (dtype_len_out == 1) { // INT8 output s->dtype_len = 1; } else if (dtype_len_out == 2) { // INT16 output s->dtype_len = 2; } else if (s->col.converted_type == TIME_MILLIS) { // INT64 output s->dtype_len = 8; } } else if (data_type == BYTE_ARRAY && dtype_len_out == 4) { s->dtype_len = 4; // HASH32 output } else if (data_type == INT96) { s->dtype_len = 8; // Convert to 64-bit timestamp } // during the decoding step we need to offset the global output buffers // for each level of nesting so that we write to the section this page // is responsible for. // - for flat schemas, we can do this directly by using row counts // - for nested schemas, these offsets are computed during the preprocess step // // NOTE: in a chunked read situation, s->col.column_data_base and s->col.valid_map_base // will be aliased to memory that has been freed when we get here in the non-decode step, so // we cannot check against nullptr. we'll just check a flag directly. if (is_decode_step) { int max_depth = s->col.max_nesting_depth; for (int idx = 0; idx < max_depth; idx++) { PageNestingDecodeInfo* nesting_info = &s->nesting_info[idx]; size_t output_offset; // schemas without lists if (s->col.max_level[level_type::REPETITION] == 0) { output_offset = page_start_row >= min_row ? page_start_row - min_row : 0; } // for schemas with lists, we've already got the exact value precomputed else { output_offset = nesting_info->page_start_value; } if (s->col.column_data_base != nullptr) { nesting_info->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]); if (s->col.column_string_base != nullptr) { nesting_info->string_out = static_cast<uint8_t*>(s->col.column_string_base[idx]); } nesting_info->data_out = static_cast<uint8_t*>(s->col.column_data_base[idx]); if (nesting_info->data_out != nullptr) { // anything below max depth with a valid data pointer must be a list, so the // element size is the size of the offset type. uint32_t len = idx < max_depth - 1 ? sizeof(cudf::size_type) : s->dtype_len; // if this is a string column, then dtype_len is a lie. data will be offsets rather // than (ptr,len) tuples. if (is_string_col(s->col)) { len = sizeof(cudf::size_type); } nesting_info->data_out += (output_offset * len); } if (nesting_info->string_out != nullptr) { nesting_info->string_out += s->page.str_offset; } nesting_info->valid_map = s->col.valid_map_base[idx]; if (nesting_info->valid_map != nullptr) { nesting_info->valid_map += output_offset >> 5; nesting_info->valid_map_offset = (int32_t)(output_offset & 0x1f); } } } } s->first_output_value = 0; // Find the compressed size of repetition levels cur += InitLevelSection(s, cur, end, level_type::REPETITION); // Find the compressed size of definition levels cur += InitLevelSection(s, cur, end, level_type::DEFINITION); s->dict_bits = 0; s->dict_base = nullptr; s->dict_size = 0; // NOTE: if additional encodings are supported in the future, modifications must // be made to is_supported_encoding() in reader_impl_preprocess.cu switch (s->page.encoding) { case Encoding::PLAIN_DICTIONARY: case Encoding::RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (((s->col.data_type & 7) == BYTE_ARRAY) && (s->col.str_dict_index)) { // String dictionary: use index s->dict_base = reinterpret_cast<uint8_t const*>(s->col.str_dict_index); s->dict_size = s->col.page_info[0].num_input_values * sizeof(string_index_pair); } else { s->dict_base = s->col.page_info[0].page_data; // dictionary is always stored in the first page s->dict_size = s->col.page_info[0].uncompressed_page_size; } s->dict_run = 0; s->dict_val = 0; s->dict_bits = (cur < end) ? *cur++ : 0; if (s->dict_bits > 32 || !s->dict_base) { s->set_error_code(decode_error::INVALID_DICT_WIDTH); } break; case Encoding::PLAIN: s->dict_size = static_cast<int32_t>(end - cur); s->dict_val = 0; if ((s->col.data_type & 7) == BOOLEAN) { s->dict_run = s->dict_size * 2 + 1; } break; case Encoding::RLE: { // first 4 bytes are length of RLE data int const len = (cur[0]) + (cur[1] << 8) + (cur[2] << 16) + (cur[3] << 24); cur += 4; if (cur + len > end) { s->set_error_code(decode_error::DATA_STREAM_OVERRUN); } s->dict_run = 0; } break; case Encoding::DELTA_BINARY_PACKED: case Encoding::DELTA_BYTE_ARRAY: // nothing to do, just don't error break; default: { s->set_error_code(decode_error::UNSUPPORTED_ENCODING); break; } } if (cur > end) { s->set_error_code(decode_error::DATA_STREAM_OVERRUN); } s->lvl_end = cur; s->data_start = cur; s->data_end = end; } else { s->set_error_code(decode_error::EMPTY_PAGE); } s->lvl_count[level_type::REPETITION] = 0; s->lvl_count[level_type::DEFINITION] = 0; s->nz_count = 0; s->num_input_values = s->page.num_input_values; s->dict_pos = 0; s->src_pos = 0; // for flat hierarchies, we can't know how many leaf values to skip unless we do a full // preprocess of the definition levels (since nulls will have no actual decodable value, there // is no direct correlation between # of rows and # of decodable values). so we will start // processing at the beginning of the value stream and disregard any indices that start // before the first row. if (s->col.max_level[level_type::REPETITION] == 0) { s->page.skipped_values = 0; s->page.skipped_leaf_values = 0; s->input_value_count = 0; s->input_row_count = 0; s->input_leaf_count = 0; s->row_index_lower_bound = -1; } // for nested hierarchies, we have run a preprocess that lets us skip directly to the values // we need to start decoding at else { // input_row_count translates to "how many rows we have processed so far", so since we are // skipping directly to where we want to start decoding, set it to first_row s->input_row_count = s->first_row; // return the lower bound to compare (page-relative) thread row index against. Explanation: // In the case of nested schemas, rows can span page boundaries. That is to say, // we can encounter the first value for row X on page M, but the last value for page M // might not be the last value for row X. page M+1 (or further) may contain the last value. // // This means that the first values we encounter for a given page (M+1) may not belong to the // row indicated by chunk_row, but to the row before it that spanned page boundaries. If that // previous row is within the overall row bounds, include the values by allowing relative row // index -1 int const max_row = (min_row + num_rows) - 1; if (min_row < page_start_row && max_row >= page_start_row - 1) { s->row_index_lower_bound = -1; } else { s->row_index_lower_bound = s->first_row; } // if we're in the decoding step, jump directly to the first // value we care about if (is_decode_step) { s->input_value_count = s->page.skipped_values > -1 ? s->page.skipped_values : 0; } else { s->input_value_count = 0; s->input_leaf_count = 0; s->page.skipped_values = -1; // magic number to indicate it hasn't been set for use inside UpdatePageSizes s->page.skipped_leaf_values = 0; } } __threadfence_block(); } __syncthreads(); return true; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl.cpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl.hpp" #include "error.hpp" #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/transform.hpp> #include <cudf/detail/utilities/stream_pool.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <bitset> #include <numeric> namespace cudf::io::parquet::detail { void reader::impl::decode_page_data(size_t skip_rows, size_t num_rows) { auto& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; auto& page_nesting = _pass_itm_data->page_nesting_info; auto& page_nesting_decode = _pass_itm_data->page_nesting_decode_info; auto const level_type_size = _pass_itm_data->level_type_size; // temporary space for DELTA_BYTE_ARRAY decoding. this only needs to live until // gpu::DecodeDeltaByteArray returns. rmm::device_uvector<uint8_t> delta_temp_buf(0, _stream); // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); size_t const sum_max_depths = std::accumulate( chunks.begin(), chunks.end(), 0, [&](size_t cursum, ColumnChunkDesc const& chunk) { return cursum + _metadata->get_output_nesting_depth(chunk.src_col_schema); }); // figure out which kernels to run auto const kernel_mask = GetAggregatedDecodeKernelMask(pages, _stream); // Check to see if there are any string columns present. If so, then we need to get size info // for each string page. This size info will be used to pre-allocate memory for the column, // allowing the page decoder to write string data directly to the column buffer, rather than // doing a gather operation later on. // TODO: This step is somewhat redundant if size info has already been calculated (nested schema, // chunked reader). auto const has_strings = (kernel_mask & BitOr(decode_kernel_mask::STRING, decode_kernel_mask::DELTA_BYTE_ARRAY)) != 0; std::vector<size_t> col_sizes(_input_columns.size(), 0L); if (has_strings) { ComputePageStringSizes( pages, chunks, delta_temp_buf, skip_rows, num_rows, level_type_size, kernel_mask, _stream); col_sizes = calculate_page_string_offsets(); // check for overflow if (std::any_of(col_sizes.cbegin(), col_sizes.cend(), [](size_t sz) { return sz > std::numeric_limits<size_type>::max(); })) { CUDF_FAIL("String column exceeds the column size limit", std::overflow_error); } } // In order to reduce the number of allocations of hostdevice_vector, we allocate a single vector // to store all per-chunk pointers to nested data/nullmask. `chunk_offsets[i]` will store the // offset into `chunk_nested_data`/`chunk_nested_valids` for the array of pointers for chunk `i` auto chunk_nested_valids = cudf::detail::hostdevice_vector<bitmask_type*>(sum_max_depths, _stream); auto chunk_nested_data = cudf::detail::hostdevice_vector<void*>(sum_max_depths, _stream); auto chunk_offsets = std::vector<size_t>(); auto chunk_nested_str_data = cudf::detail::hostdevice_vector<void*>(has_strings ? sum_max_depths : 0, _stream); // Update chunks with pointers to column data. for (size_t c = 0, page_count = 0, chunk_off = 0; c < chunks.size(); c++) { input_column_info const& input_col = _input_columns[chunks[c].src_col_index]; CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema, "Column/page schema index mismatch"); size_t max_depth = _metadata->get_output_nesting_depth(chunks[c].src_col_schema); chunk_offsets.push_back(chunk_off); // get a slice of size `nesting depth` from `chunk_nested_valids` to store an array of pointers // to validity data auto valids = chunk_nested_valids.host_ptr(chunk_off); chunks[c].valid_map_base = chunk_nested_valids.device_ptr(chunk_off); // get a slice of size `nesting depth` from `chunk_nested_data` to store an array of pointers to // out data auto data = chunk_nested_data.host_ptr(chunk_off); chunks[c].column_data_base = chunk_nested_data.device_ptr(chunk_off); auto str_data = has_strings ? chunk_nested_str_data.host_ptr(chunk_off) : nullptr; chunks[c].column_string_base = has_strings ? chunk_nested_str_data.device_ptr(chunk_off) : nullptr; chunk_off += max_depth; // fill in the arrays on the host. there are some important considerations to // take into account here for nested columns. specifically, with structs // there is sharing of output buffers between input columns. consider this schema // // required group field_id=1 name { // required binary field_id=2 firstname (String); // required binary field_id=3 middlename (String); // required binary field_id=4 lastname (String); // } // // there are 3 input columns of data here (firstname, middlename, lastname), but // only 1 output column (name). The structure of the output column buffers looks like // the schema itself // // struct (name) // string (firstname) // string (middlename) // string (lastname) // // The struct column can contain validity information. the problem is, the decode // step for the input columns will all attempt to decode this validity information // because each one has it's own copy of the repetition/definition levels. but // since this is all happening in parallel it would mean multiple blocks would // be stomping all over the same memory randomly. to work around this, we set // things up so that only 1 child of any given nesting level fills in the // data (offsets in the case of lists) or validity information for the higher // levels of the hierarchy that are shared. In this case, it would mean we // would just choose firstname to be the one that decodes the validity for name. // // we do this by only handing out the pointers to the first child we come across. // auto* cols = &_output_buffers; for (size_t idx = 0; idx < max_depth; idx++) { auto& out_buf = (*cols)[input_col.nesting[idx]]; cols = &out_buf.children; int owning_schema = out_buf.user_data & PARQUET_COLUMN_BUFFER_SCHEMA_MASK; if (owning_schema == 0 || owning_schema == input_col.schema_idx) { valids[idx] = out_buf.null_mask(); data[idx] = out_buf.data(); // only do string buffer for leaf if (out_buf.string_size() == 0 && col_sizes[chunks[c].src_col_index] > 0) { out_buf.create_string_data(col_sizes[chunks[c].src_col_index], _stream); } if (has_strings) { str_data[idx] = out_buf.string_data(); } out_buf.user_data |= static_cast<uint32_t>(input_col.schema_idx) & PARQUET_COLUMN_BUFFER_SCHEMA_MASK; } else { valids[idx] = nullptr; data[idx] = nullptr; } } // column_data_base will always point to leaf data, even for nested types. page_count += chunks[c].max_num_pages; } chunks.host_to_device_async(_stream); chunk_nested_valids.host_to_device_async(_stream); chunk_nested_data.host_to_device_async(_stream); if (has_strings) { chunk_nested_str_data.host_to_device_async(_stream); } // create this before we fork streams kernel_error error_code(_stream); // get the number of streams we need from the pool and tell them to wait on the H2D copies int const nkernels = std::bitset<32>(kernel_mask).count(); auto streams = cudf::detail::fork_streams(_stream, nkernels); // launch string decoder int s_idx = 0; if (BitAnd(kernel_mask, decode_kernel_mask::STRING) != 0) { DecodeStringPageData( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // launch delta byte array decoder if (BitAnd(kernel_mask, decode_kernel_mask::DELTA_BYTE_ARRAY) != 0) { DecodeDeltaByteArray( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // launch delta binary decoder if (BitAnd(kernel_mask, decode_kernel_mask::DELTA_BINARY) != 0) { DecodeDeltaBinary( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // launch the catch-all page decoder if (BitAnd(kernel_mask, decode_kernel_mask::GENERAL) != 0) { DecodePageData( pages, chunks, num_rows, skip_rows, level_type_size, error_code.data(), streams[s_idx++]); } // synchronize the streams cudf::detail::join_streams(streams, _stream); pages.device_to_host_async(_stream); page_nesting.device_to_host_async(_stream); page_nesting_decode.device_to_host_async(_stream); if (error_code.value() != 0) { CUDF_FAIL("Parquet data decode failed with code(s) " + error_code.str()); } // for list columns, add the final offset to every offset buffer. // TODO : make this happen in more efficiently. Maybe use thrust::for_each // on each buffer. // Note : the reason we are doing this here instead of in the decode kernel is // that it is difficult/impossible for a given page to know that it is writing the very // last value that should then be followed by a terminator (because rows can span // page boundaries). for (size_t idx = 0; idx < _input_columns.size(); idx++) { input_column_info const& input_col = _input_columns[idx]; auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; if (out_buf.type.id() == type_id::LIST && (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_LIST_TERMINATED) == 0) { CUDF_EXPECTS(l_idx < input_col.nesting_depth() - 1, "Encountered a leaf list column"); auto const& child = (*cols)[input_col.nesting[l_idx + 1]]; // the final offset for a list at level N is the size of it's child int const offset = child.type.id() == type_id::LIST ? child.size - 1 : child.size; CUDF_CUDA_TRY(cudaMemcpyAsync(static_cast<int32_t*>(out_buf.data()) + (out_buf.size - 1), &offset, sizeof(offset), cudaMemcpyDefault, _stream.value())); out_buf.user_data |= PARQUET_COLUMN_BUFFER_FLAG_LIST_TERMINATED; } else if (out_buf.type.id() == type_id::STRING) { // need to cap off the string offsets column size_type const sz = static_cast<size_type>(col_sizes[idx]); cudaMemcpyAsync(static_cast<int32_t*>(out_buf.data()) + out_buf.size, &sz, sizeof(size_type), cudaMemcpyDefault, _stream.value()); } } } // update null counts in the final column buffers for (size_t idx = 0; idx < pages.size(); idx++) { PageInfo* pi = &pages[idx]; if (pi->flags & PAGEINFO_FLAGS_DICTIONARY) { continue; } ColumnChunkDesc* col = &chunks[pi->chunk_idx]; input_column_info const& input_col = _input_columns[col->src_col_index]; int index = pi->nesting_decode - page_nesting_decode.device_ptr(); PageNestingDecodeInfo* pndi = &page_nesting_decode[index]; auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if I wasn't the one who wrote out the validity bits, skip it if (chunk_nested_valids.host_ptr(chunk_offsets[pi->chunk_idx])[l_idx] == nullptr) { continue; } out_buf.null_count() += pndi[l_idx].null_count; } } _stream.synchronize(); } reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : impl(0 /*chunk_read_limit*/, 0 /*input_pass_read_limit*/, std::forward<std::vector<std::unique_ptr<cudf::io::datasource>>>(sources), options, stream, mr) { } reader::impl::impl(std::size_t chunk_read_limit, std::size_t pass_read_limit, std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _stream{stream}, _mr{mr}, _sources{std::move(sources)}, _output_chunk_read_limit{chunk_read_limit}, _input_pass_read_limit{pass_read_limit} { // Open and parse the source dataset metadata _metadata = std::make_unique<aggregate_reader_metadata>(_sources); // Override output timestamp resolution if requested if (options.get_timestamp_type().id() != type_id::EMPTY) { _timestamp_type = options.get_timestamp_type(); } // Strings may be returned as either string or categorical columns _strings_to_categorical = options.is_enabled_convert_strings_to_categories(); // Binary columns can be read as binary or strings _reader_column_schema = options.get_column_schema(); // Select only columns required by the options std::tie(_input_columns, _output_buffers, _output_column_schemas) = _metadata->select_columns(options.get_columns(), options.is_enabled_use_pandas_metadata(), _strings_to_categorical, _timestamp_type.id()); // Save the states of the output buffers for reuse in `chunk_read()`. for (auto const& buff : _output_buffers) { _output_buffers_template.emplace_back(cudf::io::detail::inline_column_buffer::empty_like(buff)); } } void reader::impl::prepare_data(int64_t skip_rows, std::optional<size_type> const& num_rows, bool uses_custom_row_bounds, host_span<std::vector<size_type> const> row_group_indices, std::optional<std::reference_wrapper<ast::expression const>> filter) { // if we have not preprocessed at the whole-file level, do that now if (!_file_preprocessed) { // if filter is not empty, then create output types as vector and pass for filtering. std::vector<data_type> output_types; if (filter.has_value()) { std::transform(_output_buffers.cbegin(), _output_buffers.cend(), std::back_inserter(output_types), [](auto const& col) { return col.type; }); } std::tie( _file_itm_data.global_skip_rows, _file_itm_data.global_num_rows, _file_itm_data.row_groups) = _metadata->select_row_groups( row_group_indices, skip_rows, num_rows, output_types, filter, _stream); if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && not _input_columns.empty()) { // fills in chunk information without physically loading or decompressing // the associated data create_global_chunk_info(); // compute schedule of input reads. Each rowgroup contains 1 chunk per column. For now // we will read an entire row group at a time. However, it is possible to do // sub-rowgroup reads if we made some estimates on individual chunk sizes (tricky) and // changed the high level structure such that we weren't always reading an entire table's // worth of columns at once. compute_input_passes(); } _file_preprocessed = true; } // if we have to start a new pass, do that now if (!_pass_preprocessed) { auto const num_passes = _file_itm_data.input_pass_row_group_offsets.size() - 1; // always create the pass struct, even if we end up with no passes. // this will also cause the previous pass information to be deleted _pass_itm_data = std::make_unique<pass_intermediate_data>(); if (_file_itm_data.global_num_rows > 0 && not _file_itm_data.row_groups.empty() && not _input_columns.empty() && _current_input_pass < num_passes) { // setup the pass_intermediate_info for this pass. setup_next_pass(); load_and_decompress_data(); preprocess_pages(uses_custom_row_bounds, _output_chunk_read_limit); if (_output_chunk_read_limit == 0) { // read the whole file at once CUDF_EXPECTS(_pass_itm_data->output_chunk_read_info.size() == 1, "Reading the whole file should yield only one chunk."); } } _pass_preprocessed = true; } } void reader::impl::populate_metadata(table_metadata& out_metadata) { // Return column names out_metadata.schema_info.resize(_output_buffers.size()); for (size_t i = 0; i < _output_column_schemas.size(); i++) { auto const& schema = _metadata->get_schema(_output_column_schemas[i]); out_metadata.schema_info[i].name = schema.name; out_metadata.schema_info[i].is_nullable = schema.repetition_type != REQUIRED; } // Return user metadata out_metadata.per_file_user_data = _metadata->get_key_value_metadata(); out_metadata.user_data = {out_metadata.per_file_user_data[0].begin(), out_metadata.per_file_user_data[0].end()}; } table_with_metadata reader::impl::read_chunk_internal( bool uses_custom_row_bounds, std::optional<std::reference_wrapper<ast::expression const>> filter) { // If `_output_metadata` has been constructed, just copy it over. auto out_metadata = _output_metadata ? table_metadata{*_output_metadata} : table_metadata{}; out_metadata.schema_info.resize(_output_buffers.size()); // output cudf columns as determined by the top level schema auto out_columns = std::vector<std::unique_ptr<column>>{}; out_columns.reserve(_output_buffers.size()); if (!has_next() || _pass_itm_data->output_chunk_read_info.empty()) { return finalize_output(out_metadata, out_columns, filter); } auto const& read_info = _pass_itm_data->output_chunk_read_info[_pass_itm_data->current_output_chunk]; // Allocate memory buffers for the output columns. allocate_columns(read_info.skip_rows, read_info.num_rows, uses_custom_row_bounds); // Parse data into the output buffers. decode_page_data(read_info.skip_rows, read_info.num_rows); // Create the final output cudf columns. for (size_t i = 0; i < _output_buffers.size(); ++i) { auto metadata = _reader_column_schema.has_value() ? std::make_optional<reader_column_schema>((*_reader_column_schema)[i]) : std::nullopt; auto const& schema = _metadata->get_schema(_output_column_schemas[i]); // FIXED_LEN_BYTE_ARRAY never read as string if (schema.type == FIXED_LEN_BYTE_ARRAY and schema.converted_type != DECIMAL) { metadata = std::make_optional<reader_column_schema>(); metadata->set_convert_binary_to_strings(false); } // Only construct `out_metadata` if `_output_metadata` has not been cached. if (!_output_metadata) { column_name_info& col_name = out_metadata.schema_info[i]; out_columns.emplace_back(make_column(_output_buffers[i], &col_name, metadata, _stream)); } else { out_columns.emplace_back(make_column(_output_buffers[i], nullptr, metadata, _stream)); } } // Add empty columns if needed. Filter output columns based on filter. return finalize_output(out_metadata, out_columns, filter); } table_with_metadata reader::impl::finalize_output( table_metadata& out_metadata, std::vector<std::unique_ptr<column>>& out_columns, std::optional<std::reference_wrapper<ast::expression const>> filter) { // Create empty columns as needed (this can happen if we've ended up with no actual data to read) for (size_t i = out_columns.size(); i < _output_buffers.size(); ++i) { if (!_output_metadata) { column_name_info& col_name = out_metadata.schema_info[i]; out_columns.emplace_back(io::detail::empty_like(_output_buffers[i], &col_name, _stream, _mr)); } else { out_columns.emplace_back(io::detail::empty_like(_output_buffers[i], nullptr, _stream, _mr)); } } if (!_output_metadata) { populate_metadata(out_metadata); // Finally, save the output table metadata into `_output_metadata` for reuse next time. _output_metadata = std::make_unique<table_metadata>(out_metadata); } // advance chunks/passes as necessary _pass_itm_data->current_output_chunk++; _chunk_count++; if (_pass_itm_data->current_output_chunk >= _pass_itm_data->output_chunk_read_info.size()) { _pass_itm_data->current_output_chunk = 0; _pass_itm_data->output_chunk_read_info.clear(); _current_input_pass++; _pass_preprocessed = false; } if (filter.has_value()) { auto read_table = std::make_unique<table>(std::move(out_columns)); auto predicate = cudf::detail::compute_column( *read_table, filter.value().get(), _stream, rmm::mr::get_current_device_resource()); CUDF_EXPECTS(predicate->view().type().id() == type_id::BOOL8, "Predicate filter should return a boolean"); auto output_table = cudf::detail::apply_boolean_mask(*read_table, *predicate, _stream, _mr); return {std::move(output_table), std::move(out_metadata)}; } return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)}; } table_with_metadata reader::impl::read( int64_t skip_rows, std::optional<size_type> const& num_rows, bool uses_custom_row_bounds, host_span<std::vector<size_type> const> row_group_indices, std::optional<std::reference_wrapper<ast::expression const>> filter) { CUDF_EXPECTS(_output_chunk_read_limit == 0, "Reading the whole file must not have non-zero byte_limit."); table_metadata metadata; populate_metadata(metadata); auto expr_conv = named_to_reference_converter(filter, metadata); auto output_filter = expr_conv.get_converted_expr(); prepare_data(skip_rows, num_rows, uses_custom_row_bounds, row_group_indices, output_filter); return read_chunk_internal(uses_custom_row_bounds, output_filter); } table_with_metadata reader::impl::read_chunk() { // Reset the output buffers to their original states (right after reader construction). // Don't need to do it if we read the file all at once. if (_chunk_count > 0) { _output_buffers.resize(0); for (auto const& buff : _output_buffers_template) { _output_buffers.emplace_back(cudf::io::detail::inline_column_buffer::empty_like(buff)); } } prepare_data(0 /*skip_rows*/, std::nullopt /*num_rows, `nullopt` means unlimited*/, true /*uses_custom_row_bounds*/, {} /*row_group_indices, empty means read all row groups*/, std::nullopt /*filter*/); return read_chunk_internal(true, std::nullopt); } bool reader::impl::has_next() { prepare_data(0 /*skip_rows*/, std::nullopt /*num_rows, `nullopt` means unlimited*/, true /*uses_custom_row_bounds*/, {} /*row_group_indices, empty means read all row groups*/, std::nullopt /*filter*/); size_t const num_input_passes = std::max( int64_t{0}, static_cast<int64_t>(_file_itm_data.input_pass_row_group_offsets.size()) - 1); return (_pass_itm_data->current_output_chunk < _pass_itm_data->output_chunk_read_info.size()) || (_current_input_pass < num_input_passes); } namespace { parquet_column_schema walk_schema(aggregate_reader_metadata const* mt, int idx) { SchemaElement const& sch = mt->get_schema(idx); std::vector<parquet_column_schema> children; for (auto const& child_idx : sch.children_idx) { children.push_back(walk_schema(mt, child_idx)); } return parquet_column_schema{ sch.name, static_cast<parquet::TypeKind>(sch.type), std::move(children)}; } } // namespace parquet_metadata read_parquet_metadata(host_span<std::unique_ptr<datasource> const> sources) { // Open and parse the source dataset metadata auto metadata = aggregate_reader_metadata(sources); return parquet_metadata{parquet_schema{walk_schema(&metadata, 0)}, metadata.get_num_rows(), metadata.get_num_row_groups(), metadata.get_key_value_metadata()[0]}; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/compact_protocol_reader.hpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet.hpp" #include <algorithm> #include <cstddef> #include <optional> #include <string> #include <utility> #include <vector> namespace cudf::io::parquet::detail { /** * @brief Class for parsing Parquet's Thrift Compact Protocol encoded metadata * * This class takes in the starting location of the Parquet metadata, and fills * out Thrift-derived structs and a schema tree. * * In a Parquet, the metadata is separated from the data, both conceptually and * physically. There may be multiple data files sharing a common metadata file. * * The parser handles both V1 and V2 Parquet datasets, although not all * compression codecs are supported yet. */ class CompactProtocolReader { public: explicit CompactProtocolReader(uint8_t const* base = nullptr, size_t len = 0) { init(base, len); } void init(uint8_t const* base, size_t len) { m_base = m_cur = base; m_end = base + len; } [[nodiscard]] ptrdiff_t bytecount() const noexcept { return m_cur - m_base; } unsigned int getb() noexcept { return (m_cur < m_end) ? *m_cur++ : 0; } void skip_bytes(size_t bytecnt) noexcept { bytecnt = std::min(bytecnt, (size_t)(m_end - m_cur)); m_cur += bytecnt; } // returns a varint encoded integer template <typename T> T get_varint() noexcept { T v = 0; for (uint32_t l = 0;; l += 7) { T c = getb(); v |= (c & 0x7f) << l; if (c < 0x80) { break; } } return v; } // returns a zigzag encoded signed integer template <typename T> T get_zigzag() noexcept { using U = std::make_unsigned_t<T>; U const u = get_varint<U>(); return static_cast<T>((u >> 1u) ^ -static_cast<T>(u & 1)); } // thrift spec says to use zigzag i32 for i16 types int32_t get_i16() noexcept { return get_zigzag<int32_t>(); } int32_t get_i32() noexcept { return get_zigzag<int32_t>(); } int64_t get_i64() noexcept { return get_zigzag<int64_t>(); } uint32_t get_u32() noexcept { return get_varint<uint32_t>(); } uint64_t get_u64() noexcept { return get_varint<uint64_t>(); } [[nodiscard]] std::pair<uint8_t, uint32_t> get_listh() noexcept { uint32_t const c = getb(); uint32_t sz = c >> 4; uint8_t t = c & 0xf; if (sz == 0xf) { sz = get_u32(); } return {t, sz}; } bool skip_struct_field(int t, int depth = 0); public: // Generate Thrift structure parsing routines bool read(FileMetaData* f); bool read(SchemaElement* s); bool read(LogicalType* l); bool read(DecimalType* d); bool read(TimeType* t); bool read(TimeUnit* u); bool read(TimestampType* t); bool read(IntType* t); bool read(RowGroup* r); bool read(ColumnChunk* c); bool read(ColumnChunkMetaData* c); bool read(PageHeader* p); bool read(DataPageHeader* d); bool read(DictionaryPageHeader* d); bool read(DataPageHeaderV2* d); bool read(KeyValue* k); bool read(PageLocation* p); bool read(OffsetIndex* o); bool read(ColumnIndex* c); bool read(Statistics* s); bool read(ColumnOrder* c); public: static int NumRequiredBits(uint32_t max_level) noexcept { return 32 - CountLeadingZeros32(max_level); } bool InitSchema(FileMetaData* md); protected: int WalkSchema(FileMetaData* md, int idx = 0, int parent_idx = 0, int max_def_level = 0, int max_rep_level = 0); protected: uint8_t const* m_base = nullptr; uint8_t const* m_cur = nullptr; uint8_t const* m_end = nullptr; friend class parquet_field_string; friend class parquet_field_string_list; friend class parquet_field_binary; friend class parquet_field_binary_list; friend class parquet_field_struct_blob; }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_string_decode.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "delta_binary.cuh" #include "page_decode.cuh" #include "page_string_utils.cuh" #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/stream_pool.hpp> #include <cudf/strings/detail/gather.cuh> #include <thrust/logical.h> #include <thrust/transform_scan.h> #include <bitset> namespace cudf::io::parquet::detail { namespace { constexpr int preprocess_block_size = 512; constexpr int decode_block_size = 128; constexpr int delta_preproc_block_size = 64; constexpr int rolling_buf_size = decode_block_size * 2; constexpr int preproc_buf_size = LEVEL_DECODE_BUF_SIZE; /** * @brief Compute the start and end page value bounds for this page * * This uses definition and repetition level info to determine the number of valid and null * values for the page, taking into account skip_rows/num_rows (if set). * * @param s The local page info * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read * @param is_bounds_pg True if this page is clipped * @param has_repetition True if the schema is nested * @param decoders Definition and repetition level decoders * @return pair containing start and end value indexes * @tparam level_t Type used to store decoded repetition and definition levels * @tparam rle_buf_size Size of the buffer used when decoding repetition and definition levels */ template <typename level_t, int rle_buf_size> __device__ thrust::pair<int, int> page_bounds(page_state_s* const s, size_t min_row, size_t num_rows, bool is_bounds_pg, bool has_repetition, rle_stream<level_t, rle_buf_size>* decoders) { using block_reduce = cub::BlockReduce<int, preprocess_block_size>; using block_scan = cub::BlockScan<int, preprocess_block_size>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; auto const t = threadIdx.x; // decode batches of level stream data using rle_stream objects and use the results to // calculate start and end value positions in the encoded string data. int const max_depth = s->col.max_nesting_depth; int const max_def = s->nesting_info[max_depth - 1].max_def_level; // can skip all this if we know there are no nulls if (max_def == 0 && !is_bounds_pg) { s->page.num_valids = s->num_input_values; s->page.num_nulls = 0; return {0, s->num_input_values}; } int start_value = 0; int end_value = s->page.num_input_values; auto const pp = &s->page; auto const col = &s->col; // initialize the stream decoders (requires values computed in setupLocalPageInfo) auto const def_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::DEFINITION]); auto const rep_decode = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::REPETITION]); decoders[level_type::DEFINITION].init(s->col.level_bits[level_type::DEFINITION], s->abs_lvl_start[level_type::DEFINITION], s->abs_lvl_end[level_type::DEFINITION], preproc_buf_size, def_decode, s->page.num_input_values); // only need repetition if this is a bounds page. otherwise all we need is def level info // to count the nulls. if (has_repetition && is_bounds_pg) { decoders[level_type::REPETITION].init(s->col.level_bits[level_type::REPETITION], s->abs_lvl_start[level_type::REPETITION], s->abs_lvl_end[level_type::REPETITION], preproc_buf_size, rep_decode, s->page.num_input_values); } int processed = 0; // if this is a bounds page, we need to do extra work to find the start and/or end value index if (is_bounds_pg) { __shared__ int skipped_values; __shared__ int skipped_leaf_values; __shared__ int last_input_value; __shared__ int end_val_idx; // need these for skip_rows case auto const page_start_row = col->start_row + pp->chunk_row; auto const max_row = min_row + num_rows; auto const begin_row = page_start_row >= min_row ? 0 : min_row - page_start_row; auto const max_page_rows = pp->num_rows - begin_row; auto const page_rows = page_start_row + begin_row + max_page_rows <= max_row ? max_page_rows : max_row - (page_start_row + begin_row); auto end_row = begin_row + page_rows; int row_fudge = -1; // short circuit for no nulls if (max_def == 0 && !has_repetition) { if (t == 0) { pp->num_nulls = 0; pp->num_valids = end_row - begin_row; } return {begin_row, end_row}; } int row_count = 0; int leaf_count = 0; bool skipped_values_set = false; bool end_value_set = false; while (processed < s->page.num_input_values) { thread_index_type start_val = processed; if (has_repetition) { decoders[level_type::REPETITION].decode_next(t); __syncthreads(); // special case where page does not begin at a row boundary if (processed == 0 && rep_decode[0] != 0) { if (t == 0) { skipped_values = 0; skipped_leaf_values = 0; } skipped_values_set = true; end_row++; // need to finish off the previous row row_fudge = 0; } } // the # of rep/def levels will always be the same size processed += decoders[level_type::DEFINITION].decode_next(t); __syncthreads(); // do something with the level data while (start_val < processed) { auto const idx_t = start_val + t; auto const idx = rolling_index<preproc_buf_size>(idx_t); // get absolute thread row index int is_new_row = idx_t < processed && (!has_repetition || rep_decode[idx] == 0); int thread_row_count, block_row_count; block_scan(temp_storage.scan_storage) .InclusiveSum(is_new_row, thread_row_count, block_row_count); __syncthreads(); // get absolute thread leaf index int const is_new_leaf = idx_t < processed && (def_decode[idx] >= max_def); int thread_leaf_count, block_leaf_count; block_scan(temp_storage.scan_storage) .InclusiveSum(is_new_leaf, thread_leaf_count, block_leaf_count); __syncthreads(); // if we have not set skipped values yet, see if we found the first in-bounds row if (!skipped_values_set && row_count + block_row_count > begin_row) { // if this thread is in row bounds int const row_index = thread_row_count + row_count - 1; int const in_row_bounds = idx_t < processed && (row_index >= begin_row) && (row_index < end_row); int local_count, global_count; block_scan(temp_storage.scan_storage) .InclusiveSum(in_row_bounds, local_count, global_count); __syncthreads(); // we found it if (global_count > 0) { // this is the thread that represents the first row. need to test in_row_bounds for // the case where we only want one row and local_count == 1 for many threads. if (local_count == 1 && in_row_bounds) { skipped_values = idx_t; skipped_leaf_values = leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count); } skipped_values_set = true; } } // test if row_count will exceed end_row in this batch if (!end_value_set && row_count + block_row_count >= end_row) { // if this thread exceeds row bounds. row_fudge change depending on whether we've faked // the end row to account for starting a page in the middle of a row. int const row_index = thread_row_count + row_count + row_fudge; int const exceeds_row_bounds = row_index >= end_row; int local_count, global_count; block_scan(temp_storage.scan_storage) .InclusiveSum(exceeds_row_bounds, local_count, global_count); __syncthreads(); // we found it if (global_count > 0) { // this is the thread that represents the end row. if (local_count == 1) { last_input_value = idx_t; end_val_idx = leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count); } end_value_set = true; break; } } row_count += block_row_count; leaf_count += block_leaf_count; start_val += preprocess_block_size; } __syncthreads(); if (end_value_set) { break; } } start_value = skipped_values_set ? skipped_leaf_values : 0; end_value = end_value_set ? end_val_idx : leaf_count; if (t == 0) { int const v0 = skipped_values_set ? skipped_values : 0; int const vn = end_value_set ? last_input_value : s->num_input_values; int const total_values = vn - v0; int const total_leaf_values = end_value - start_value; int const num_nulls = total_values - total_leaf_values; pp->num_nulls = num_nulls; pp->num_valids = total_leaf_values; } } // already filtered out unwanted pages, so need to count all non-null values in this page else { int num_nulls = 0; while (processed < s->page.num_input_values) { thread_index_type start_val = processed; processed += decoders[level_type::DEFINITION].decode_next(t); __syncthreads(); while (start_val < processed) { auto const idx_t = start_val + t; if (idx_t < processed) { auto const idx = rolling_index<preproc_buf_size>(idx_t); if (def_decode[idx] < max_def) { num_nulls++; } } start_val += preprocess_block_size; } __syncthreads(); } int const null_count = block_reduce(temp_storage.reduce_storage).Sum(num_nulls); if (t == 0) { pp->num_nulls = null_count; pp->num_valids = pp->num_input_values - null_count; } __syncthreads(); end_value -= pp->num_nulls; } return {start_value, end_value}; } /** * @brief Compute string size information for dictionary encoded strings. * * @param data Pointer to the start of the page data stream * @param dict_base Pointer to the start of the dictionary * @param dict_bits The number of bits used to in the dictionary bit packing * @param dict_size Size of the dictionary in bytes * @param data_size Size of the page data in bytes * @param start_value Do not count values that occur before this index * @param end_value Do not count values that occur after this index */ __device__ size_t totalDictEntriesSize(uint8_t const* data, uint8_t const* dict_base, int dict_bits, int dict_size, int data_size, int start_value, int end_value) { int const t = threadIdx.x; uint8_t const* ptr = data; uint8_t const* const end = data + data_size; int const bytecnt = (dict_bits + 7) >> 3; size_t l_str_len = 0; // partial sums across threads int pos = 0; // current value index in the data stream int t0 = 0; // thread 0 for this batch int dict_run = 0; int dict_val = 0; while (pos < end_value && ptr <= end) { if (dict_run <= 1) { dict_run = (ptr < end) ? get_vlq32(ptr, end) : 0; if (!(dict_run & 1)) { // Repeated value if (ptr + bytecnt <= end) { int32_t run_val = ptr[0]; if (bytecnt > 1) { run_val |= ptr[1] << 8; if (bytecnt > 2) { run_val |= ptr[2] << 16; if (bytecnt > 3) { run_val |= ptr[3] << 24; } } } dict_val = run_val & ((1 << dict_bits) - 1); } ptr += bytecnt; } } int batch_len; if (dict_run & 1) { // Literal batch: must output a multiple of 8, except for the last batch int batch_len_div8; batch_len = max(min(preprocess_block_size, (int)(dict_run >> 1) * 8), 1); batch_len_div8 = (batch_len + 7) >> 3; dict_run -= batch_len_div8 * 2; ptr += batch_len_div8 * dict_bits; } else { batch_len = dict_run >> 1; dict_run = 0; } int const is_literal = dict_run & 1; // calculate my thread id for this batch. way to round-robin the work. int mytid = t - t0; if (mytid < 0) mytid += preprocess_block_size; // compute dictionary index. if (is_literal) { int dict_idx = 0; if (mytid < batch_len) { dict_idx = dict_val; int32_t ofs = (mytid - ((batch_len + 7) & ~7)) * dict_bits; const uint8_t* p = ptr + (ofs >> 3); ofs &= 7; if (p < end) { uint32_t c = 8 - ofs; dict_idx = (*p++) >> ofs; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; c += 8; if (c < dict_bits && p < end) { dict_idx |= (*p++) << c; } } } dict_idx &= (1 << dict_bits) - 1; } if (pos + mytid < end_value) { uint32_t const dict_pos = (dict_bits > 0) ? dict_idx * sizeof(string_index_pair) : 0; if (pos + mytid >= start_value && dict_pos < (uint32_t)dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos); l_str_len += src->second; } } } t0 += batch_len; } else { int const start_off = (pos < start_value && pos + batch_len > start_value) ? start_value - pos : 0; batch_len = min(batch_len, end_value - pos); if (mytid == 0) { uint32_t const dict_pos = (dict_bits > 0) ? dict_val * sizeof(string_index_pair) : 0; if (pos + batch_len > start_value && dict_pos < (uint32_t)dict_size) { const auto* src = reinterpret_cast<const string_index_pair*>(dict_base + dict_pos); l_str_len += (batch_len - start_off) * src->second; } } t0 += 1; } t0 = t0 % preprocess_block_size; pos += batch_len; } __syncthreads(); using block_reduce = cub::BlockReduce<size_t, preprocess_block_size>; __shared__ typename block_reduce::TempStorage reduce_storage; size_t sum_l = block_reduce(reduce_storage).Sum(l_str_len); return sum_l; } /** * @brief Compute string size information for plain encoded strings. * * @param data Pointer to the start of the page data stream * @param data_size Length of data * @param start_value Do not count values that occur before this index * @param end_value Do not count values that occur after this index */ __device__ size_t totalPlainEntriesSize(uint8_t const* data, int data_size, int start_value, int end_value) { int const t = threadIdx.x; int pos = 0; size_t total_len = 0; // This step is purely serial if (!t) { const uint8_t* cur = data; int k = 0; while (pos < end_value && k < data_size) { int len; if (k + 4 <= data_size) { len = (cur[k]) | (cur[k + 1] << 8) | (cur[k + 2] << 16) | (cur[k + 3] << 24); k += 4; if (k + len > data_size) { len = 0; } } else { len = 0; } k += len; if (pos >= start_value) { total_len += len; } pos++; } } return total_len; } /** * @brief Compute string size information for DELTA_BYTE_ARRAY encoded strings. * * This traverses the packed prefix and suffix lengths, summing them to obtain the total * number of bytes needed for the decoded string data. It also calculates an upper bound * for the largest string length to obtain an upper bound on temporary space needed if * rows will be skipped. * * Called with 64 threads. * * @param data Pointer to the start of the page data stream * @param end Pointer to the end of the page data stream * @param start_value Do not count values that occur before this index * @param end_value Do not count values that occur after this index * @return A pair of `size_t` values representing the total string size and temp buffer size * required for decoding */ __device__ thrust::pair<size_t, size_t> totalDeltaByteArraySize(uint8_t const* data, uint8_t const* end, int start_value, int end_value) { using cudf::detail::warp_size; using WarpReduce = cub::WarpReduce<uleb128_t>; __shared__ typename WarpReduce::TempStorage temp_storage[2]; __shared__ __align__(16) delta_binary_decoder prefixes; __shared__ __align__(16) delta_binary_decoder suffixes; int const t = threadIdx.x; int const lane_id = t % warp_size; int const warp_id = t / warp_size; if (t == 0) { auto const* suffix_start = prefixes.find_end_of_block(data, end); suffixes.init_binary_block(suffix_start, end); } __syncthreads(); // two warps will traverse the prefixes and suffixes and sum them up auto const db = t < warp_size ? &prefixes : t < 2 * warp_size ? &suffixes : nullptr; size_t total_bytes = 0; uleb128_t max_len = 0; if (db != nullptr) { // initialize with first value (which is stored in last_value) if (lane_id == 0 && start_value == 0) { total_bytes = db->last_value; } uleb128_t lane_sum = 0; uleb128_t lane_max = 0; while (db->current_value_idx < end_value && db->current_value_idx < db->num_encoded_values(true)) { // calculate values for current mini-block db->calc_mini_block_values(lane_id); // get per lane sum for mini-block for (uint32_t i = 0; i < db->values_per_mb; i += 32) { uint32_t const idx = db->current_value_idx + i + lane_id; if (idx >= start_value && idx < end_value && idx < db->value_count) { lane_sum += db->value[rolling_index<delta_rolling_buf_size>(idx)]; lane_max = max(lane_max, db->value[rolling_index<delta_rolling_buf_size>(idx)]); } } if (lane_id == 0) { db->setup_next_mini_block(true); } __syncwarp(); } // get sum for warp. // note: warp_sum will only be valid on lane 0. auto const warp_sum = WarpReduce(temp_storage[warp_id]).Sum(lane_sum); auto const warp_max = WarpReduce(temp_storage[warp_id]).Reduce(lane_max, cub::Max()); if (lane_id == 0) { total_bytes += warp_sum; max_len = warp_max; } } __syncthreads(); // now sum up total_bytes from the two warps auto const final_bytes = cudf::detail::single_lane_block_sum_reduce<delta_preproc_block_size, 0>(total_bytes); // Sum up prefix and suffix max lengths to get a max possible string length. Multiply that // by the number of strings in a mini-block, plus one to save the last string. auto const temp_bytes = cudf::detail::single_lane_block_sum_reduce<delta_preproc_block_size, 0>(max_len) * (db->values_per_mb + 1); return {final_bytes, temp_bytes}; } /** * @brief Kernel for computing string page bounds information. * * This kernel traverses the repetition and definition level data to determine start and end values * for pages with string-like data. Also calculates the number of null and valid values in the * page. Does nothing if the page mask is neither `STRING` nor `DELTA_BYTE_ARRAY`. On exit the * `num_nulls`, `num_valids`, `start_val` and `end_val` fields of the `PageInfo` struct will be * populated. * * @param pages All pages to be decoded * @param chunks All chunks to be decoded * @param min_rows crop all rows below min_row * @param num_rows Maximum number of rows to read * @tparam level_t Type used to store decoded repetition and definition levels */ template <typename level_t> __global__ void __launch_bounds__(preprocess_block_size) gpuComputeStringPageBounds( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int const page_idx = blockIdx.x; int const t = threadIdx.x; PageInfo* const pp = &pages[page_idx]; if (t == 0) { s->page.num_nulls = 0; s->page.num_valids = 0; // reset str_bytes to 0 in case it's already been calculated (esp needed for chunked reads). // TODO: need to rethink this once str_bytes is in the statistics pp->str_bytes = 0; } // whether or not we have repetition levels (lists) bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; // the required number of runs in shared memory we will need to provide the // rle_stream object constexpr int rle_run_buffer_size = rle_stream_required_run_buffer_size<preprocess_block_size>(); // the level stream decoders __shared__ rle_run<level_t> def_runs[rle_run_buffer_size]; __shared__ rle_run<level_t> rep_runs[rle_run_buffer_size]; rle_stream<level_t, preprocess_block_size> decoders[level_type::NUM_LEVEL_TYPES] = {{def_runs}, {rep_runs}}; // setup page info auto const mask = BitOr(decode_kernel_mask::STRING, decode_kernel_mask::DELTA_BYTE_ARRAY); if (!setupLocalPageInfo(s, pp, chunks, min_row, num_rows, mask_filter{mask}, true)) { return; } bool const is_bounds_pg = is_bounds_page(s, min_row, num_rows, has_repetition); // find start/end value indices auto const [start_value, end_value] = page_bounds(s, min_row, num_rows, is_bounds_pg, has_repetition, decoders); // need to save num_nulls and num_valids calculated in page_bounds in this page if (t == 0) { pp->num_nulls = s->page.num_nulls; pp->num_valids = s->page.num_valids; pp->start_val = start_value; pp->end_val = end_value; } } /** * @brief Kernel for computing string page output size information for delta_byte_array encoding. * * This call ignores columns that are not DELTA_BYTE_ARRAY encoded. On exit the `str_bytes` field * of the `PageInfo` struct will be populated. Also fills in the `temp_string_size` field if rows * are to be skipped. * * @param pages All pages to be decoded * @param chunks All chunks to be decoded * @param min_rows crop all rows below min_row * @param num_rows Maximum number of rows to read */ __global__ void __launch_bounds__(delta_preproc_block_size) gpuComputeDeltaPageStringSizes( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int const page_idx = blockIdx.x; int const t = threadIdx.x; PageInfo* const pp = &pages[page_idx]; // whether or not we have repetition levels (lists) bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; // setup page info auto const mask = decode_kernel_mask::DELTA_BYTE_ARRAY; if (!setupLocalPageInfo(s, pp, chunks, min_row, num_rows, mask_filter{mask}, true)) { return; } auto const start_value = pp->start_val; // if data size is known, can short circuit here if ((chunks[pp->chunk_idx].data_type & 7) == FIXED_LEN_BYTE_ARRAY) { if (t == 0) { pp->str_bytes = pp->num_valids * s->dtype_len_in; // only need temp space if we're skipping values if (start_value > 0) { // just need to parse the header of the first delta binary block to get values_per_mb delta_binary_decoder db; db.init_binary_block(s->data_start, s->data_end); // save enough for one mini-block plus some extra to save the last_string pp->temp_string_size = s->dtype_len_in * (db.values_per_mb + 1); } } } else { // now process string info in the range [start_value, end_value) // set up for decoding strings...can be either plain or dictionary uint8_t const* data = s->data_start; uint8_t const* const end = s->data_end; auto const end_value = pp->end_val; auto const [len, temp_bytes] = totalDeltaByteArraySize(data, end, start_value, end_value); if (t == 0) { // TODO check for overflow pp->str_bytes = len; // only need temp space if we're skipping values if (start_value > 0) { pp->temp_string_size = temp_bytes; } } } } /** * @brief Kernel for computing string page output size information. * * This call ignores non-string columns. On exit the `str_bytes` field of the `PageInfo` struct will * be populated. * * @param pages All pages to be decoded * @param chunks All chunks to be decoded * @param min_rows crop all rows below min_row * @param num_rows Maximum number of rows to read */ __global__ void __launch_bounds__(preprocess_block_size) gpuComputePageStringSizes( PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int const page_idx = blockIdx.x; int const t = threadIdx.x; PageInfo* const pp = &pages[page_idx]; // whether or not we have repetition levels (lists) bool const has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; // setup page info if (!setupLocalPageInfo( s, pp, chunks, min_row, num_rows, mask_filter{decode_kernel_mask::STRING}, true)) { return; } bool const is_bounds_pg = is_bounds_page(s, min_row, num_rows, has_repetition); auto const& col = s->col; size_t str_bytes = 0; // short circuit for FIXED_LEN_BYTE_ARRAY if ((col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) { str_bytes = pp->num_valids * s->dtype_len_in; } else { // now process string info in the range [start_value, end_value) // set up for decoding strings...can be either plain or dictionary uint8_t const* data = s->data_start; uint8_t const* const end = s->data_end; uint8_t const* dict_base = nullptr; int dict_size = 0; auto const start_value = pp->start_val; auto const end_value = pp->end_val; switch (pp->encoding) { case Encoding::PLAIN_DICTIONARY: case Encoding::RLE_DICTIONARY: // RLE-packed dictionary indices, first byte indicates index length in bits if (col.str_dict_index) { // String dictionary: use index dict_base = reinterpret_cast<const uint8_t*>(col.str_dict_index); dict_size = col.page_info[0].num_input_values * sizeof(string_index_pair); } else { dict_base = col.page_info[0].page_data; // dictionary is always stored in the first page dict_size = col.page_info[0].uncompressed_page_size; } // FIXME: need to return an error condition...this won't actually do anything if (s->dict_bits > 32 || !dict_base) { CUDF_UNREACHABLE("invalid dictionary bit size"); } str_bytes = totalDictEntriesSize( data, dict_base, s->dict_bits, dict_size, (end - data), start_value, end_value); break; case Encoding::PLAIN: dict_size = static_cast<int32_t>(end - data); str_bytes = is_bounds_pg ? totalPlainEntriesSize(data, dict_size, start_value, end_value) : dict_size - sizeof(int) * pp->num_valids; break; } } if (t == 0) { // TODO check for overflow pp->str_bytes = str_bytes; // only need temp space for delta pp->temp_string_size = 0; } } /** * @brief Kernel for computing the string column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. * * This version uses a single warp to do the string copies. * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read * @tparam level_t Type used to store decoded repetition and definition levels */ template <typename level_t> __global__ void __launch_bounds__(decode_block_size) gpuDecodeStringPageData(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, int32_t* error_code) { using cudf::detail::warp_size; __shared__ __align__(16) page_state_s state_g; __shared__ __align__(4) size_type last_offset; __shared__ __align__(16) page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int const page_idx = blockIdx.x; int const t = threadIdx.x; int const lane_id = t % warp_size; [[maybe_unused]] null_count_back_copier _{s, t}; auto const mask = decode_kernel_mask::STRING; if (!setupLocalPageInfo( s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{mask}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // offsets are local to the page if (t == 0) { last_offset = 0; } __syncthreads(); int const out_thread0 = s->dict_base && s->dict_bits == 0 ? 32 : 64; int const leaf_level_index = s->col.max_nesting_depth - 1; PageNestingDecodeInfo* const nesting_info_base = s->nesting_info; __shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (s->error == 0 && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (decode_block_size - out_thread0), s->nz_count + (decode_block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } // TODO(ets): see if this sync can be removed __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<rolling_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, lane_id).first; } else { gpuInitStringDescriptors<false>(s, sb, src_target_pos, lane_id); } if (t == 32) { s->dict_pos = src_target_pos; } } else { int const me = t - out_thread0; // WARP1..WARP3: Decode values src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // need to do this before we branch on src_pos/dst_pos so we don't deadlock // choose a character parallel string copy when the average string is longer than a warp using cudf::detail::warp_size; auto const use_char_ll = s->page.num_valids > 0 && (s->page.str_bytes / s->page.num_valids) >= warp_size; if (me < warp_size) { for (int i = 0; i < decode_block_size - out_thread0; i += warp_size) { dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos + i)]; if (!has_repetition) { dst_pos -= s->first_row; } auto [ptr, len] = src_pos + i < target_pos && dst_pos >= 0 ? gpuGetStringData(s, sb, src_pos + skipped_leaf_values + i) : cuda::std::pair<char const*, size_t>{nullptr, 0}; __shared__ cub::WarpScan<size_type>::TempStorage temp_storage; size_type offset; cub::WarpScan<size_type>(temp_storage).ExclusiveSum(len, offset); offset += last_offset; if (use_char_ll) { __shared__ __align__(8) uint8_t const* pointers[warp_size]; __shared__ __align__(4) size_type offsets[warp_size]; __shared__ __align__(4) int dsts[warp_size]; __shared__ __align__(4) int lengths[warp_size]; offsets[me] = offset; pointers[me] = reinterpret_cast<uint8_t const*>(ptr); dsts[me] = dst_pos; lengths[me] = len; __syncwarp(); for (int ss = 0; ss < warp_size && ss + i + s->src_pos < target_pos; ss++) { if (dsts[ss] >= 0) { auto offptr = reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) + dsts[ss]; *offptr = lengths[ss]; auto str_ptr = nesting_info_base[leaf_level_index].string_out + offsets[ss]; ll_strcpy(str_ptr, pointers[ss], lengths[ss], me); } } } else { if (src_pos + i < target_pos && dst_pos >= 0) { auto offptr = reinterpret_cast<int32_t*>(nesting_info_base[leaf_level_index].data_out) + dst_pos; *offptr = len; auto str_ptr = nesting_info_base[leaf_level_index].string_out + offset; memcpy(str_ptr, ptr, len); } __syncwarp(); } // last thread in warp updates last_offset if (me == warp_size - 1) { last_offset = offset + len; } __syncwarp(); } } if (t == out_thread0) { s->src_pos = target_pos; } } __syncthreads(); } // now turn array of lengths into offsets int value_count = nesting_info_base[leaf_level_index].value_count; // if no repetition we haven't calculated start/end bounds and instead just skipped // values until we reach first_row. account for that here. if (!has_repetition) { value_count -= s->first_row; } auto const offptr = reinterpret_cast<size_type*>(nesting_info_base[leaf_level_index].data_out); block_excl_sum<decode_block_size>(offptr, value_count, s->page.str_offset); if (t == 0 and s->error != 0) { set_error(s->error, error_code); } } // Functor used to set the `temp_string_buf` pointer for each page. `data` points to a buffer // to be used when skipping rows in the delta_byte_array decoder. Given a page and an offset, // set the page's `temp_string_buf` to be `data + offset`. struct page_tform_functor { uint8_t* const data; __device__ PageInfo operator()(PageInfo& page, int64_t offset) { if (page.temp_string_size != 0) { page.temp_string_buf = data + offset; } return page; } }; } // anonymous namespace /** * @copydoc cudf::io::parquet::detail::ComputePageStringSizes */ void ComputePageStringSizes(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, rmm::device_uvector<uint8_t>& temp_string_buf, size_t min_row, size_t num_rows, int level_type_size, uint32_t kernel_mask, rmm::cuda_stream_view stream) { dim3 const dim_block(preprocess_block_size, 1); dim3 const dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuComputeStringPageBounds<uint8_t> <<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows); } else { gpuComputeStringPageBounds<uint16_t> <<<dim_grid, dim_block, 0, stream.value()>>>(pages.device_ptr(), chunks, min_row, num_rows); } // kernel mask may contain other kernels we don't need to count int const count_mask = kernel_mask & BitOr(decode_kernel_mask::DELTA_BYTE_ARRAY, decode_kernel_mask::STRING); int const nkernels = std::bitset<32>(count_mask).count(); auto const streams = cudf::detail::fork_streams(stream, nkernels); int s_idx = 0; if (BitAnd(kernel_mask, decode_kernel_mask::DELTA_BYTE_ARRAY) != 0) { dim3 dim_delta(delta_preproc_block_size, 1); gpuComputeDeltaPageStringSizes<<<dim_grid, dim_delta, 0, streams[s_idx++].value()>>>( pages.device_ptr(), chunks, min_row, num_rows); } if (BitAnd(kernel_mask, decode_kernel_mask::STRING) != 0) { gpuComputePageStringSizes<<<dim_grid, dim_block, 0, streams[s_idx++].value()>>>( pages.device_ptr(), chunks, min_row, num_rows); } // synchronize the streams cudf::detail::join_streams(streams, stream); // check for needed temp space for DELTA_BYTE_ARRAY auto const need_sizes = thrust::any_of( rmm::exec_policy(stream), pages.d_begin(), pages.d_end(), [] __device__(auto& page) { return page.temp_string_size != 0; }); if (need_sizes) { // sum up all of the temp_string_sizes auto const page_sizes = [] __device__(PageInfo const& page) { return page.temp_string_size; }; auto const total_size = thrust::transform_reduce(rmm::exec_policy(stream), pages.d_begin(), pages.d_end(), page_sizes, 0L, thrust::plus<int64_t>{}); // now do an exclusive scan over the temp_string_sizes to get offsets for each // page's chunk of the temp buffer rmm::device_uvector<int64_t> page_string_offsets(pages.size(), stream); thrust::transform_exclusive_scan(rmm::exec_policy_nosync(stream), pages.d_begin(), pages.d_end(), page_string_offsets.begin(), page_sizes, 0L, thrust::plus<int64_t>{}); // allocate the temp space temp_string_buf.resize(total_size, stream); // now use the offsets array to set each page's temp_string_buf pointers thrust::transform(rmm::exec_policy_nosync(stream), pages.d_begin(), pages.d_end(), page_string_offsets.begin(), pages.d_begin(), page_tform_functor{temp_string_buf.data()}); } } /** * @copydoc cudf::io::parquet::detail::DecodeStringPageData */ void __host__ DecodeStringPageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(decode_block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuDecodeStringPageData<uint8_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { gpuDecodeStringPageData<uint16_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/decode_preprocess.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "page_decode.cuh" #include <io/utilities/column_buffer.hpp> #include <cudf/hashing/detail/default_hash.cuh> #include <rmm/exec_policy.hpp> #include <thrust/reduce.h> namespace cudf::io::parquet::detail { namespace { // # of threads we're decoding with constexpr int preprocess_block_size = 512; // the required number of runs in shared memory we will need to provide the // rle_stream object constexpr int rle_run_buffer_size = rle_stream_required_run_buffer_size<preprocess_block_size>(); // the size of the rolling batch buffer constexpr int rolling_buf_size = LEVEL_DECODE_BUF_SIZE; using unused_state_buf = page_state_buffers_s<0, 0, 0>; /** * * This function expects the dictionary position to be at 0 and will traverse * the entire thing. * * Operates on a single warp only. Expects t < 32 * * @param s The local page info * @param t Thread index */ __device__ size_type gpuDecodeTotalPageStringSize(page_state_s* s, int t) { size_type target_pos = s->num_input_values; size_type str_len = 0; if (s->dict_base) { auto const [new_target_pos, len] = gpuDecodeDictionaryIndices<true, unused_state_buf>(s, nullptr, target_pos, t); target_pos = new_target_pos; str_len = len; } else if ((s->col.data_type & 7) == BYTE_ARRAY) { str_len = gpuInitStringDescriptors<true, unused_state_buf>(s, nullptr, target_pos, t); } if (!t) { s->dict_pos = target_pos; } return str_len; } /** * @brief Update output column sizes for every nesting level based on a batch * of incoming decoded definition and repetition level values. * * If bounds_set is true, computes skipped_values and skipped_leaf_values for the * page to indicate where we need to skip to based on min/max row. * * Operates at the block level. * * @param s The local page info * @param target_value_count The target value count to process up to * @param rep Repetition level buffer * @param def Definition level buffer * @param t Thread index * @param bounds_set A boolean indicating whether or not min/max row bounds have been set */ template <typename level_t> static __device__ void gpuUpdatePageSizes(page_state_s* s, int target_value_count, level_t const* const rep, level_t const* const def, int t, bool bounds_set) { // max nesting depth of the column int const max_depth = s->col.max_nesting_depth; constexpr int num_warps = preprocess_block_size / 32; constexpr int max_batch_size = num_warps * 32; using block_reduce = cub::BlockReduce<int, preprocess_block_size>; using block_scan = cub::BlockScan<int, preprocess_block_size>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; // how many input level values we've processed in the page so far int value_count = s->input_value_count; // how many rows we've processed in the page so far int row_count = s->input_row_count; // how many leaf values we've processed in the page so far int leaf_count = s->input_leaf_count; // whether or not we need to continue checking for the first row bool skipped_values_set = s->page.skipped_values >= 0; while (value_count < target_value_count) { int const batch_size = min(max_batch_size, target_value_count - value_count); // start/end depth int start_depth, end_depth, d; get_nesting_bounds<rolling_buf_size, level_t>( start_depth, end_depth, d, s, rep, def, value_count, value_count + batch_size, t); // is this thread within row bounds? in the non skip_rows/num_rows case this will always // be true. int in_row_bounds = 1; // if we are in the skip_rows/num_rows case, we need to check against these limits if (bounds_set) { // get absolute thread row index int const is_new_row = start_depth == 0; int thread_row_count, block_row_count; block_scan(temp_storage.scan_storage) .InclusiveSum(is_new_row, thread_row_count, block_row_count); __syncthreads(); // get absolute thread leaf index int const is_new_leaf = (d >= s->nesting_info[max_depth - 1].max_def_level); int thread_leaf_count, block_leaf_count; block_scan(temp_storage.scan_storage) .InclusiveSum(is_new_leaf, thread_leaf_count, block_leaf_count); __syncthreads(); // if this thread is in row bounds int const row_index = (thread_row_count + row_count) - 1; in_row_bounds = (row_index >= s->row_index_lower_bound) && (row_index < (s->first_row + s->num_rows)); // if we have not set skipped values yet, see if we found the first in-bounds row if (!skipped_values_set) { int local_count, global_count; block_scan(temp_storage.scan_storage) .InclusiveSum(in_row_bounds, local_count, global_count); __syncthreads(); // we found it if (global_count > 0) { // this is the thread that represents the first row. if (local_count == 1 && in_row_bounds) { s->page.skipped_values = value_count + t; s->page.skipped_leaf_values = leaf_count + (is_new_leaf ? thread_leaf_count - 1 : thread_leaf_count); } skipped_values_set = true; } } row_count += block_row_count; leaf_count += block_leaf_count; } // increment value counts across all nesting depths for (int s_idx = 0; s_idx < max_depth; s_idx++) { int const in_nesting_bounds = (s_idx >= start_depth && s_idx <= end_depth && in_row_bounds); int const count = block_reduce(temp_storage.reduce_storage).Sum(in_nesting_bounds); __syncthreads(); if (!t) { PageNestingInfo* pni = &s->page.nesting[s_idx]; pni->batch_size += count; } } value_count += batch_size; } // update final outputs if (!t) { s->input_value_count = value_count; // only used in the skip_rows/num_rows case s->input_leaf_count = leaf_count; s->input_row_count = row_count; } } /** * @brief Kernel for computing per-page column size information for all nesting levels. * * This function will write out the size field for each level of nesting. * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read. Pass as INT_MAX to guarantee reading all rows * @param is_base_pass Whether or not this is the base pass. We first have to compute * the full size information of every page before we come through in a second (trim) pass * to determine what subset of rows in this page we should be reading * @param compute_string_sizes Whether or not we should be computing string sizes * (PageInfo::str_bytes) as part of the pass */ template <typename level_t> __global__ void __launch_bounds__(preprocess_block_size) gpuComputePageSizes(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, bool is_base_pass, bool compute_string_sizes) { __shared__ __align__(16) page_state_s state_g; page_state_s* const s = &state_g; int page_idx = blockIdx.x; int t = threadIdx.x; PageInfo* pp = &pages[page_idx]; // whether or not we have repetition levels (lists) bool has_repetition = chunks[pp->chunk_idx].max_level[level_type::REPETITION] > 0; // the level stream decoders __shared__ rle_run<level_t> def_runs[rle_run_buffer_size]; __shared__ rle_run<level_t> rep_runs[rle_run_buffer_size]; rle_stream<level_t, preprocess_block_size> decoders[level_type::NUM_LEVEL_TYPES] = {{def_runs}, {rep_runs}}; // setup page info if (!setupLocalPageInfo(s, pp, chunks, min_row, num_rows, all_types_filter{}, false)) { return; } // initialize the stream decoders (requires values computed in setupLocalPageInfo) // the size of the rolling batch buffer int const max_batch_size = rolling_buf_size; level_t* rep = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::REPETITION]); level_t* def = reinterpret_cast<level_t*>(pp->lvl_decode_buf[level_type::DEFINITION]); decoders[level_type::DEFINITION].init(s->col.level_bits[level_type::DEFINITION], s->abs_lvl_start[level_type::DEFINITION], s->abs_lvl_end[level_type::DEFINITION], max_batch_size, def, s->page.num_input_values); if (has_repetition) { decoders[level_type::REPETITION].init(s->col.level_bits[level_type::REPETITION], s->abs_lvl_start[level_type::REPETITION], s->abs_lvl_end[level_type::REPETITION], max_batch_size, rep, s->page.num_input_values); } __syncthreads(); if (!t) { s->page.skipped_values = -1; s->page.skipped_leaf_values = 0; s->page.str_bytes = 0; s->input_row_count = 0; s->input_value_count = 0; // in the base pass, we're computing the number of rows, make sure we visit absolutely // everything if (is_base_pass) { s->first_row = 0; s->num_rows = INT_MAX; s->row_index_lower_bound = -1; } } // we only need to preprocess hierarchies with repetition in them (ie, hierarchies // containing lists anywhere within). compute_string_sizes = compute_string_sizes && ((s->col.data_type & 7) == BYTE_ARRAY && s->dtype_len != 4); // early out optimizations: // - if this is a flat hierarchy (no lists) and is not a string column. in this case we don't need // to do the expensive work of traversing the level data to determine sizes. we can just compute // it directly. if (!has_repetition && !compute_string_sizes) { int depth = 0; while (depth < s->page.num_output_nesting_levels) { auto const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { if (is_base_pass) { pp->nesting[thread_depth].size = pp->num_input_values; } pp->nesting[thread_depth].batch_size = pp->num_input_values; } depth += blockDim.x; } return; } // in the trim pass, for anything with lists, we only need to fully process bounding pages (those // at the beginning or the end of the row bounds) if (!is_base_pass && !is_bounds_page(s, min_row, num_rows, has_repetition)) { int depth = 0; while (depth < s->page.num_output_nesting_levels) { auto const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { // if we are not a bounding page (as checked above) then we are either // returning all rows/values from this page, or 0 of them pp->nesting[thread_depth].batch_size = (s->num_rows == 0 && !is_page_contained(s, min_row, num_rows)) ? 0 : pp->nesting[thread_depth].size; } depth += blockDim.x; } return; } // zero sizes int depth = 0; while (depth < s->page.num_output_nesting_levels) { auto const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { s->page.nesting[thread_depth].batch_size = 0; } depth += blockDim.x; } __syncthreads(); // the core loop. decode batches of level stream data using rle_stream objects // and pass the results to gpuUpdatePageSizes int processed = 0; while (processed < s->page.num_input_values) { // TODO: it would not take much more work to make it so that we could run both of these // decodes concurrently. there are a couple of shared variables internally that would have to // get dealt with but that's about it. if (has_repetition) { decoders[level_type::REPETITION].decode_next(t); __syncthreads(); } // the # of rep/def levels will always be the same size processed += decoders[level_type::DEFINITION].decode_next(t); __syncthreads(); // update page sizes gpuUpdatePageSizes<level_t>(s, processed, rep, def, t, !is_base_pass); __syncthreads(); } // retrieve total string size. // TODO: make this block-based instead of just 1 warp if (compute_string_sizes) { if (t < 32) { s->page.str_bytes = gpuDecodeTotalPageStringSize(s, t); } } // update output results: // - real number of rows for the whole page // - nesting sizes for the whole page // - skipped value information for trimmed pages // - string bytes if (is_base_pass) { // nesting level 0 is the root column, so the size is also the # of rows if (!t) { pp->num_rows = s->page.nesting[0].batch_size; } // store off this batch size as the "full" size int depth = 0; while (depth < s->page.num_output_nesting_levels) { auto const thread_depth = depth + t; if (thread_depth < s->page.num_output_nesting_levels) { pp->nesting[thread_depth].size = pp->nesting[thread_depth].batch_size; } depth += blockDim.x; } } if (!t) { pp->skipped_values = s->page.skipped_values; pp->skipped_leaf_values = s->page.skipped_leaf_values; pp->str_bytes = s->page.str_bytes; } } } // anonymous namespace /** * @copydoc cudf::io::parquet::gpu::ComputePageSizes */ void ComputePageSizes(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t min_row, size_t num_rows, bool compute_num_rows, bool compute_string_sizes, int level_type_size, rmm::cuda_stream_view stream) { dim3 dim_block(preprocess_block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page // computes: // PageNestingInfo::size for each level of nesting, for each page. // This computes the size for the entire page, not taking row bounds into account. // If uses_custom_row_bounds is set to true, we have to do a second pass later that "trims" // the starting and ending read values to account for these bounds. if (level_type_size == 1) { gpuComputePageSizes<uint8_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, compute_num_rows, compute_string_sizes); } else { gpuComputePageSizes<uint16_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, compute_num_rows, compute_string_sizes); } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_delta_decode.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "delta_binary.cuh" #include "page_string_utils.cuh" #include "parquet_gpu.hpp" #include <cudf/detail/utilities/cuda.cuh> #include <rmm/exec_policy.hpp> #include <thrust/transform_scan.h> namespace cudf::io::parquet::detail { namespace { constexpr int decode_block_size = 128; // DELTA_BYTE_ARRAY encoding (incremental encoding or front compression), is used for BYTE_ARRAY // columns. For each element in a sequence of strings, a prefix length from the preceding string // and a suffix is stored. The prefix lengths are DELTA_BINARY_PACKED encoded. The suffixes are // encoded with DELTA_LENGTH_BYTE_ARRAY encoding, which is a DELTA_BINARY_PACKED list of suffix // lengths, followed by the concatenated suffix data. struct delta_byte_array_decoder { uint8_t const* last_string; // pointer to last decoded string...needed for its prefix uint8_t const* suffix_char_data; // pointer to the start of character data uint8_t* temp_buf; // buffer used when skipping values uint32_t start_val; // decoded strings up to this index will be dumped to temp_buf uint32_t last_string_len; // length of the last decoded string delta_binary_decoder prefixes; // state of decoder for prefix lengths delta_binary_decoder suffixes; // state of decoder for suffix lengths // initialize the prefixes and suffixes blocks __device__ void init(uint8_t const* start, uint8_t const* end, uint32_t start_idx, uint8_t* temp) { auto const* suffix_start = prefixes.find_end_of_block(start, end); suffix_char_data = suffixes.find_end_of_block(suffix_start, end); last_string = nullptr; temp_buf = temp; start_val = start_idx; } // kind of like an inclusive scan for strings. takes prefix_len bytes from preceding // string and prepends to the suffix we've already copied into place. called from // within loop over values_in_mb, so this only needs to handle a single warp worth of data // at a time. __device__ void string_scan(uint8_t* strings_out, uint8_t const* last_string, uint32_t start_idx, uint32_t end_idx, uint32_t offset, uint32_t lane_id) { using cudf::detail::warp_size; // let p(n) === length(prefix(string_n)) // // if p(n-1) > p(n), then string_n can be completed when string_n-2 is completed. likewise if // p(m) > p(n), then string_n can be completed with string_m-1. however, if p(m) < p(n), then m // is a "blocker" for string_n; string_n can be completed only after string_m is. // // we will calculate the nearest blocking position for each lane, and then fill in string_0. we // then iterate, finding all lanes that have had their "blocker" filled in and completing them. // when all lanes are filled in, we return. this will still hit the worst case if p(n-1) < p(n) // for all n __shared__ __align__(8) int64_t prefix_lens[warp_size]; __shared__ __align__(8) uint8_t const* offsets[warp_size]; uint32_t const ln_idx = start_idx + lane_id; uint64_t prefix_len = ln_idx < end_idx ? prefixes.value_at(ln_idx) : 0; uint8_t* const lane_out = ln_idx < end_idx ? strings_out + offset : nullptr; prefix_lens[lane_id] = prefix_len; offsets[lane_id] = lane_out; // if all prefix_len's are zero, then there's nothing to do if (__all_sync(0xffff'ffff, prefix_len == 0)) { return; } // find a neighbor to the left that has a prefix length less than this lane. once that // neighbor is complete, this lane can be completed. int blocker = lane_id - 1; while (blocker > 0 && prefix_lens[blocker] != 0 && prefix_len <= prefix_lens[blocker]) { blocker--; } // fill in lane 0 (if necessary) if (lane_id == 0 && prefix_len > 0) { memcpy(lane_out, last_string, prefix_len); prefix_lens[0] = prefix_len = 0; } __syncwarp(); // now fill in blockers until done for (uint32_t i = 1; i < warp_size && i + start_idx < end_idx; i++) { if (prefix_len != 0 && prefix_lens[blocker] == 0 && lane_out != nullptr) { memcpy(lane_out, offsets[blocker], prefix_len); prefix_lens[lane_id] = prefix_len = 0; } // check for finished if (__all_sync(0xffff'ffff, prefix_len == 0)) { return; } } } // calculate a mini-batch of string values, writing the results to // `strings_out`. starting at global index `start_idx` and decoding // up to `num_values` strings. // called by all threads in a warp. used for strings <= 32 chars. // returns number of bytes written __device__ size_t calculate_string_values(uint8_t* strings_out, uint32_t start_idx, uint32_t num_values, uint32_t lane_id) { using cudf::detail::warp_size; using WarpScan = cub::WarpScan<uint64_t>; __shared__ WarpScan::TempStorage scan_temp; if (start_idx >= suffixes.value_count) { return 0; } auto end_idx = start_idx + min(suffixes.values_per_mb, num_values); end_idx = min(end_idx, static_cast<uint32_t>(suffixes.value_count)); auto p_strings_out = strings_out; auto p_temp_out = temp_buf; auto copy_batch = [&](uint8_t* out, uint32_t idx, uint32_t end) { uint32_t const ln_idx = idx + lane_id; // calculate offsets into suffix data uint64_t const suffix_len = ln_idx < end ? suffixes.value_at(ln_idx) : 0; uint64_t suffix_off = 0; WarpScan(scan_temp).ExclusiveSum(suffix_len, suffix_off); // calculate offsets into string data uint64_t const prefix_len = ln_idx < end ? prefixes.value_at(ln_idx) : 0; uint64_t const string_len = prefix_len + suffix_len; // get offset into output for each lane uint64_t string_off, warp_total; WarpScan(scan_temp).ExclusiveSum(string_len, string_off, warp_total); auto const so_ptr = out + string_off; // copy suffixes into string data if (ln_idx < end) { memcpy(so_ptr + prefix_len, suffix_char_data + suffix_off, suffix_len); } __syncwarp(); // copy prefixes into string data. string_scan(out, last_string, idx, end, string_off, lane_id); // save the position of the last computed string. this will be used in // the next iteration to reconstruct the string in lane 0. if (ln_idx == end - 1 || (ln_idx < end && lane_id == 31)) { // set last_string to this lane's string last_string = out + string_off; last_string_len = string_len; // and consume used suffix_char_data suffix_char_data += suffix_off + suffix_len; } return warp_total; }; uint64_t string_total = 0; for (int idx = start_idx; idx < end_idx; idx += warp_size) { auto const n_in_batch = min(warp_size, end_idx - idx); // account for the case where start_val occurs in the middle of this batch if (idx < start_val && idx + n_in_batch > start_val) { // dump idx...start_val into temp_buf copy_batch(p_temp_out, idx, start_val); __syncwarp(); // start_val...idx + n_in_batch into strings_out auto nbytes = copy_batch(p_strings_out, start_val, idx + n_in_batch); p_strings_out += nbytes; string_total = nbytes; } else { if (idx < start_val) { p_temp_out += copy_batch(p_temp_out, idx, end_idx); } else { auto nbytes = copy_batch(p_strings_out, idx, end_idx); p_strings_out += nbytes; string_total += nbytes; } } __syncwarp(); } return string_total; } // character parallel version of CalculateStringValues(). This is faster for strings longer than // 32 chars. __device__ size_t calculate_string_values_cp(uint8_t* strings_out, uint32_t start_idx, uint32_t num_values, uint32_t lane_id) { using cudf::detail::warp_size; __shared__ __align__(8) uint8_t* so_ptr; if (start_idx >= suffixes.value_count) { return; } auto end_idx = start_idx + min(suffixes.values_per_mb, num_values); end_idx = min(end_idx, static_cast<uint32_t>(suffixes.value_count)); if (lane_id == 0) { so_ptr = start_idx < start_val ? temp_buf : strings_out; } __syncwarp(); uint64_t string_total = 0; for (int idx = start_idx; idx < end_idx; idx++) { uint64_t const suffix_len = suffixes.value_at(idx); uint64_t const prefix_len = prefixes.value_at(idx); uint64_t const string_len = prefix_len + suffix_len; // copy prefix and suffix data into current strings_out position // for longer strings use a 4-byte version stolen from gather_chars_fn_string_parallel. if (string_len > 64) { if (prefix_len > 0) { wideStrcpy(so_ptr, last_string, prefix_len, lane_id); } if (suffix_len > 0) { wideStrcpy(so_ptr + prefix_len, suffix_char_data, suffix_len, lane_id); } } else { for (int i = lane_id; i < string_len; i += warp_size) { so_ptr[i] = i < prefix_len ? last_string[i] : suffix_char_data[i - prefix_len]; } } __syncwarp(); if (idx >= start_val) { string_total += string_len; } if (lane_id == 0) { last_string = so_ptr; last_string_len = string_len; suffix_char_data += suffix_len; if (idx == start_val - 1) { so_ptr = strings_out; } else { so_ptr += string_len; } } __syncwarp(); } return string_total; } // dump strings before start_val to temp buf __device__ void skip(bool use_char_ll) { using cudf::detail::warp_size; int const t = threadIdx.x; int const lane_id = t % warp_size; // is this even necessary? return if asking to skip the whole block. if (start_val >= prefixes.num_encoded_values(true)) { return; } // prefixes and suffixes will have the same parameters (it's checked earlier) auto const batch_size = prefixes.values_per_mb; uint32_t skip_pos = 0; while (prefixes.current_value_idx < start_val) { // warp 0 gets prefixes and warp 1 gets suffixes auto* const db = t < 32 ? &prefixes : &suffixes; // this will potentially decode past start_val, but that's ok if (t < 64) { db->decode_batch(); } __syncthreads(); // warp 0 decodes the batch. if (t < 32) { auto const num_to_decode = min(batch_size, start_val - skip_pos); auto const bytes_written = use_char_ll ? calculate_string_values_cp(temp_buf, skip_pos, num_to_decode, lane_id) : calculate_string_values(temp_buf, skip_pos, num_to_decode, lane_id); // store last_string someplace safe in temp buffer if (t == 0) { memcpy(temp_buf + bytes_written, last_string, last_string_len); last_string = temp_buf + bytes_written; } } skip_pos += prefixes.values_per_mb; __syncthreads(); } } }; // Decode page data that is DELTA_BINARY_PACKED encoded. This encoding is // only used for int32 and int64 physical types (and appears to only be used // with V2 page headers; see https://www.mail-archive.com/dev@parquet.apache.org/msg11826.html). // this kernel only needs 96 threads (3 warps)(for now). template <typename level_t> __global__ void __launch_bounds__(96) gpuDecodeDeltaBinary(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, int32_t* error_code) { using cudf::detail::warp_size; __shared__ __align__(16) delta_binary_decoder db_state; __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) page_state_buffers_s<delta_rolling_buf_size, 0, 0> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int const page_idx = blockIdx.x; int const t = threadIdx.x; int const lane_id = t % warp_size; auto* const db = &db_state; [[maybe_unused]] null_count_back_copier _{s, t}; auto const mask = decode_kernel_mask::DELTA_BINARY; if (!setupLocalPageInfo( s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{mask}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // copying logic from gpuDecodePageData. PageNestingDecodeInfo const* nesting_info_base = s->nesting_info; __shared__ level_t rep[delta_rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[delta_rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t const skipped_leaf_values = s->page.skipped_leaf_values; // initialize delta state if (t == 0) { db->init_binary_block(s->data_start, s->data_end); } __syncthreads(); auto const batch_size = db->values_per_mb; if (batch_size > max_delta_mini_block_size) { set_error(static_cast<int32_t>(decode_error::DELTA_PARAMS_UNSUPPORTED), error_code); return; } // if skipped_leaf_values is non-zero, then we need to decode up to the first mini-block // that has a value we need. if (skipped_leaf_values > 0) { db->skip_values(skipped_leaf_values); } while (s->error == 0 && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { uint32_t target_pos; uint32_t const src_pos = s->src_pos; if (t < 2 * warp_size) { // warp0..1 target_pos = min(src_pos + 2 * batch_size, s->nz_count + batch_size); } else { // warp2 target_pos = min(s->nz_count, src_pos + batch_size); } // TODO(ets): see if this sync can be removed __syncthreads(); // warp0 will decode the rep/def levels, warp1 will unpack a mini-batch of deltas. // warp2 waits one cycle for warps 0/1 to produce a batch, and then stuffs values // into the proper location in the output. if (t < warp_size) { // warp 0 // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<delta_rolling_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < 2 * warp_size) { // warp 1 db->decode_batch(); } else if (src_pos < target_pos) { // warp 2 // nesting level that is storing actual leaf values int const leaf_level_index = s->col.max_nesting_depth - 1; // process the mini-block in batches of 32 for (uint32_t sp = src_pos + lane_id; sp < src_pos + batch_size; sp += 32) { // the position in the output column/buffer int32_t dst_pos = sb->nz_idx[rolling_index<delta_rolling_buf_size>(sp)]; // handle skip_rows here. flat hierarchies can just skip up to first_row. if (!has_repetition) { dst_pos -= s->first_row; } // place value for this thread if (dst_pos >= 0 && sp < target_pos) { void* const dst = nesting_info_base[leaf_level_index].data_out + dst_pos * s->dtype_len; auto const val = db->value_at(sp + skipped_leaf_values); switch (s->dtype_len) { case 1: *static_cast<int8_t*>(dst) = val; break; case 2: *static_cast<int16_t*>(dst) = val; break; case 4: *static_cast<int32_t*>(dst) = val; break; case 8: *static_cast<int64_t*>(dst) = val; break; } } } if (lane_id == 0) { s->src_pos = src_pos + batch_size; } } __syncthreads(); } if (t == 0 and s->error != 0) { set_error(s->error, error_code); } } // Decode page data that is DELTA_BYTE_ARRAY packed. This encoding consists of a DELTA_BINARY_PACKED // array of prefix lengths, followed by a DELTA_BINARY_PACKED array of suffix lengths, followed by // the suffixes (technically the suffixes are DELTA_LENGTH_BYTE_ARRAY encoded). The latter two can // be used to create an offsets array for the suffix data, but then this needs to be combined with // the prefix lengths to do the final decode for each value. Because the lengths of the prefixes and // suffixes are not encoded in the header, we're going to have to first do a quick pass through them // to find the start/end of each structure. template <typename level_t> __global__ void __launch_bounds__(decode_block_size) gpuDecodeDeltaByteArray(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, int32_t* error_code) { using cudf::detail::warp_size; __shared__ __align__(16) delta_byte_array_decoder db_state; __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) page_state_buffers_s<delta_rolling_buf_size, 0, 0> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int const page_idx = blockIdx.x; int const t = threadIdx.x; int const lane_id = t % warp_size; auto* const prefix_db = &db_state.prefixes; auto* const suffix_db = &db_state.suffixes; auto* const dba = &db_state; [[maybe_unused]] null_count_back_copier _{s, t}; auto const mask = decode_kernel_mask::DELTA_BYTE_ARRAY; if (!setupLocalPageInfo( s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{mask}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; // choose a character parallel string copy when the average string is longer than a warp auto const use_char_ll = (s->page.str_bytes / s->page.num_valids) > cudf::detail::warp_size; // copying logic from gpuDecodePageData. PageNestingDecodeInfo const* nesting_info_base = s->nesting_info; __shared__ level_t rep[delta_rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[delta_rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t const skipped_leaf_values = s->page.skipped_leaf_values; if (t == 0) { // initialize the prefixes and suffixes blocks dba->init(s->data_start, s->data_end, s->page.start_val, s->page.temp_string_buf); } __syncthreads(); // assert that prefix and suffix have same mini-block size if (prefix_db->values_per_mb != suffix_db->values_per_mb or prefix_db->block_size != suffix_db->block_size or prefix_db->value_count != suffix_db->value_count) { set_error(static_cast<int32_t>(decode_error::DELTA_PARAM_MISMATCH), error_code); return; } // pointer to location to output final strings int const leaf_level_index = s->col.max_nesting_depth - 1; auto strings_data = nesting_info_base[leaf_level_index].string_out; auto const batch_size = prefix_db->values_per_mb; if (batch_size > max_delta_mini_block_size) { set_error(static_cast<int32_t>(decode_error::DELTA_PARAMS_UNSUPPORTED), error_code); return; } // if this is a bounds page and nested, then we need to skip up front. non-nested will work // its way through the page. int string_pos = has_repetition ? s->page.start_val : 0; auto const is_bounds_pg = is_bounds_page(s, min_row, num_rows, has_repetition); if (is_bounds_pg && string_pos > 0) { dba->skip(use_char_ll); } while (!s->error && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { uint32_t target_pos; uint32_t const src_pos = s->src_pos; if (t < 3 * warp_size) { // warp 0..2 target_pos = min(src_pos + 2 * batch_size, s->nz_count + s->first_row + batch_size); } else { // warp 3 target_pos = min(s->nz_count, src_pos + batch_size); } // TODO(ets): see if this sync can be removed __syncthreads(); // warp0 will decode the rep/def levels, warp1 will unpack a mini-batch of prefixes, warp 2 will // unpack a mini-batch of suffixes. warp3 waits one cycle for warps 0-2 to produce a batch, and // then stuffs values into the proper location in the output. if (t < warp_size) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<delta_rolling_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < 2 * warp_size) { // warp 1 prefix_db->decode_batch(); } else if (t < 3 * warp_size) { // warp 2 suffix_db->decode_batch(); } else if (src_pos < target_pos) { // warp 3 int const nproc = min(batch_size, s->page.end_val - string_pos); strings_data += use_char_ll ? dba->calculate_string_values_cp(strings_data, string_pos, nproc, lane_id) : dba->calculate_string_values(strings_data, string_pos, nproc, lane_id); string_pos += nproc; // process the mini-block in batches of 32 for (uint32_t sp = src_pos + lane_id; sp < src_pos + batch_size; sp += 32) { // the position in the output column/buffer int dst_pos = sb->nz_idx[rolling_index<delta_rolling_buf_size>(sp)]; // handle skip_rows here. flat hierarchies can just skip up to first_row. if (!has_repetition) { dst_pos -= s->first_row; } if (dst_pos >= 0 && sp < target_pos) { auto const offptr = reinterpret_cast<size_type*>(nesting_info_base[leaf_level_index].data_out) + dst_pos; auto const src_idx = sp + skipped_leaf_values; *offptr = prefix_db->value_at(src_idx) + suffix_db->value_at(src_idx); } __syncwarp(); } if (lane_id == 0) { s->src_pos = src_pos + batch_size; } } __syncthreads(); } // now turn array of lengths into offsets int value_count = nesting_info_base[leaf_level_index].value_count; // if no repetition we haven't calculated start/end bounds and instead just skipped // values until we reach first_row. account for that here. if (!has_repetition) { value_count -= s->first_row; } auto const offptr = reinterpret_cast<size_type*>(nesting_info_base[leaf_level_index].data_out); block_excl_sum<decode_block_size>(offptr, value_count, s->page.str_offset); if (t == 0 and s->error != 0) { cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*error_code}; ref.fetch_or(s->error, cuda::std::memory_order_relaxed); } } } // anonymous namespace /** * @copydoc cudf::io::parquet::detail::DecodeDeltaBinary */ void __host__ DecodeDeltaBinary(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(96, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuDecodeDeltaBinary<uint8_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { gpuDecodeDeltaBinary<uint16_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } } /** * @copydoc cudf::io::parquet::gpu::DecodeDeltaByteArray */ void __host__ DecodeDeltaByteArray(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 const dim_block(decode_block_size, 1); dim3 const dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuDecodeDeltaByteArray<uint8_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { gpuDecodeDeltaByteArray<uint16_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/writer_impl.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO parquet writer class implementation */ #include "compact_protocol_reader.hpp" #include "compact_protocol_writer.hpp" #include "parquet_common.hpp" #include "parquet_gpu.cuh" #include "writer_impl.hpp" #include <io/comp/nvcomp_adapter.hpp> #include <io/statistics/column_statistics.cuh> #include <io/utilities/column_utils.cuh> #include <io/utilities/config_utils.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/get_value.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/linked_column.hpp> #include <cudf/detail/utilities/pinned_host_vector.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/lists/detail/dremel.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <thrust/fill.h> #include <thrust/for_each.h> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf::io::parquet::detail { using namespace cudf::io::detail; struct aggregate_writer_metadata { aggregate_writer_metadata(host_span<partition_info const> partitions, host_span<std::map<std::string, std::string> const> kv_md, host_span<SchemaElement const> tbl_schema, size_type num_columns, statistics_freq stats_granularity) : version(1), schema(std::vector<SchemaElement>(tbl_schema.begin(), tbl_schema.end())), files(partitions.size()) { for (size_t i = 0; i < partitions.size(); ++i) { this->files[i].num_rows = partitions[i].num_rows; } if (stats_granularity != statistics_freq::STATISTICS_NONE) { ColumnOrder default_order = {ColumnOrder::TYPE_ORDER}; this->column_orders = std::vector<ColumnOrder>(num_columns, default_order); } for (size_t p = 0; p < kv_md.size(); ++p) { std::transform(kv_md[p].begin(), kv_md[p].end(), std::back_inserter(this->files[p].key_value_metadata), [](auto const& kv) { return KeyValue{kv.first, kv.second}; }); } } aggregate_writer_metadata(aggregate_writer_metadata const&) = default; void update_files(host_span<partition_info const> partitions) { CUDF_EXPECTS(partitions.size() == this->files.size(), "New partitions must be same size as previously passed number of partitions"); for (size_t i = 0; i < partitions.size(); ++i) { this->files[i].num_rows += partitions[i].num_rows; } } FileMetaData get_metadata(size_t part) { CUDF_EXPECTS(part < files.size(), "Invalid part index queried"); FileMetaData meta{}; meta.version = this->version; meta.schema = this->schema; meta.num_rows = this->files[part].num_rows; meta.row_groups = this->files[part].row_groups; meta.key_value_metadata = this->files[part].key_value_metadata; meta.created_by = this->created_by; meta.column_orders = this->column_orders; return meta; } void set_file_paths(host_span<std::string const> column_chunks_file_path) { for (size_t p = 0; p < this->files.size(); ++p) { auto& file = this->files[p]; auto const& file_path = column_chunks_file_path[p]; for (auto& rowgroup : file.row_groups) { for (auto& col : rowgroup.columns) { col.file_path = file_path; } } } } FileMetaData get_merged_metadata() { FileMetaData merged_md; for (size_t p = 0; p < this->files.size(); ++p) { auto& file = this->files[p]; if (p == 0) { merged_md = this->get_metadata(0); } else { merged_md.row_groups.insert(merged_md.row_groups.end(), std::make_move_iterator(file.row_groups.begin()), std::make_move_iterator(file.row_groups.end())); merged_md.num_rows += file.num_rows; } } return merged_md; } std::vector<size_t> num_row_groups_per_file() { std::vector<size_t> global_rowgroup_base; std::transform(this->files.begin(), this->files.end(), std::back_inserter(global_rowgroup_base), [](auto const& part) { return part.row_groups.size(); }); return global_rowgroup_base; } [[nodiscard]] bool schema_matches(std::vector<SchemaElement> const& schema) const { return this->schema == schema; } auto& file(size_t p) { return files[p]; } [[nodiscard]] size_t num_files() const { return files.size(); } private: int32_t version = 0; std::vector<SchemaElement> schema; struct per_file_metadata { int64_t num_rows = 0; std::vector<RowGroup> row_groups; std::vector<KeyValue> key_value_metadata; std::vector<OffsetIndex> offset_indexes; std::vector<std::vector<uint8_t>> column_indexes; }; std::vector<per_file_metadata> files; std::string created_by = ""; thrust::optional<std::vector<ColumnOrder>> column_orders = thrust::nullopt; }; namespace { /** * @brief Function that translates GDF compression to parquet compression. * * @param compression The compression type * @return The supported Parquet compression */ Compression to_parquet_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return Compression::SNAPPY; case compression_type::ZSTD: return Compression::ZSTD; case compression_type::NONE: return Compression::UNCOMPRESSED; default: CUDF_FAIL("Unsupported compression type"); } } /** * @brief Convert a mask of encodings to a vector. * * @param encodings Vector of `Encoding`s to populate * @param enc_mask Mask of encodings used */ void update_chunk_encodings(std::vector<Encoding>& encodings, uint32_t enc_mask) { for (uint8_t enc = 0; enc < static_cast<uint8_t>(Encoding::NUM_ENCODINGS); enc++) { auto const enc_enum = static_cast<Encoding>(enc); if ((enc_mask & encoding_to_mask(enc_enum)) != 0) { encodings.push_back(enc_enum); } } } /** * @brief Compute size (in bytes) of the data stored in the given column. * * @param column The input column * @param stream CUDA stream used for device memory operations and kernel launches * @return The data size of the input */ size_t column_size(column_view const& column, rmm::cuda_stream_view stream) { if (column.is_empty()) { return 0; } if (is_fixed_width(column.type())) { return size_of(column.type()) * column.size(); } else if (column.type().id() == type_id::STRING) { auto const scol = strings_column_view(column); return cudf::detail::get_value<size_type>(scol.offsets(), column.size(), stream) - cudf::detail::get_value<size_type>(scol.offsets(), 0, stream); } else if (column.type().id() == type_id::STRUCT) { auto const scol = structs_column_view(column); size_t ret = 0; for (int i = 0; i < scol.num_children(); i++) { ret += column_size(scol.get_sliced_child(i), stream); } return ret; } else if (column.type().id() == type_id::LIST) { auto const lcol = lists_column_view(column); return column_size(lcol.get_sliced_child(stream), stream); } CUDF_FAIL("Unexpected compound type"); } // checks to see if the given column has a fixed size. This doesn't // check every row, so assumes string and list columns are not fixed, even // if each row is the same width. // TODO: update this if FIXED_LEN_BYTE_ARRAY is ever supported for writes. bool is_col_fixed_width(column_view const& column) { if (column.type().id() == type_id::STRUCT) { return std::all_of(column.child_begin(), column.child_end(), is_col_fixed_width); } return is_fixed_width(column.type()); } /** * @brief Extends SchemaElement to add members required in constructing parquet_column_view * * Added members are: * 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream * of a leaf schema node. For non-leaf struct node, this is nullptr. * 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node. * 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet * supported types */ struct schema_tree_node : public SchemaElement { cudf::detail::LinkedColPtr leaf_column; statistics_dtype stats_dtype; int32_t ts_scale; // TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The // function construct_schema_tree could be its constructor. It can have method to get the per // column nullability given a schema node index corresponding to a leaf schema. Much easier than // that is a method to get path in schema, given a leaf node }; struct leaf_schema_fn { schema_tree_node& col_schema; cudf::detail::LinkedColPtr const& col; column_in_metadata const& col_meta; bool timestamp_is_int96; bool timestamp_is_utc; template <typename T> std::enable_if_t<std::is_same_v<T, bool>, void> operator()() { col_schema.type = Type::BOOLEAN; col_schema.stats_dtype = statistics_dtype::dtype_bool; // BOOLEAN needs no converted or logical type } template <typename T> std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; col_schema.logical_type = LogicalType{IntType{8, true}}; } template <typename T> std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; col_schema.logical_type = LogicalType{IntType{16, true}}; } template <typename T> std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; // INT32 needs no converted or logical type } template <typename T> std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_int64; // INT64 needs no converted or logical type } template <typename T> std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; col_schema.logical_type = LogicalType{IntType{8, false}}; } template <typename T> std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; col_schema.logical_type = LogicalType{IntType{16, false}}; } template <typename T> std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_32; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.logical_type = LogicalType{IntType{32, false}}; } template <typename T> std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::UINT_64; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.logical_type = LogicalType{IntType{64, false}}; } template <typename T> std::enable_if_t<std::is_same_v<T, float>, void> operator()() { col_schema.type = Type::FLOAT; col_schema.stats_dtype = statistics_dtype::dtype_float32; // FLOAT needs no converted or logical type } template <typename T> std::enable_if_t<std::is_same_v<T, double>, void> operator()() { col_schema.type = Type::DOUBLE; col_schema.stats_dtype = statistics_dtype::dtype_float64; // DOUBLE needs no converted or logical type } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()() { col_schema.type = Type::BYTE_ARRAY; if (col_meta.is_enabled_output_as_binary()) { col_schema.stats_dtype = statistics_dtype::dtype_byte_array; // BYTE_ARRAY needs no converted or logical type } else { col_schema.converted_type = ConvertedType::UTF8; col_schema.stats_dtype = statistics_dtype::dtype_string; col_schema.logical_type = LogicalType{LogicalType::STRING}; } } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::DATE; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.logical_type = LogicalType{LogicalType::DATE}; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = 1000; if (not timestamp_is_int96) { col_schema.converted_type = ConvertedType::TIMESTAMP_MILLIS; col_schema.logical_type = LogicalType{TimestampType{timestamp_is_utc, TimeUnit::MILLIS}}; } } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; if (not timestamp_is_int96) { col_schema.converted_type = ConvertedType::TIMESTAMP_MILLIS; col_schema.logical_type = LogicalType{TimestampType{timestamp_is_utc, TimeUnit::MILLIS}}; } } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; if (not timestamp_is_int96) { col_schema.converted_type = ConvertedType::TIMESTAMP_MICROS; col_schema.logical_type = LogicalType{TimestampType{timestamp_is_utc, TimeUnit::MICROS}}; } } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = thrust::nullopt; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; if (timestamp_is_int96) { col_schema.ts_scale = -1000; // negative value indicates division by absolute value } // set logical type if it's not int96 else { col_schema.logical_type = LogicalType{TimestampType{timestamp_is_utc, TimeUnit::NANOS}}; } } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.ts_scale = 24 * 60 * 60 * 1000; col_schema.logical_type = LogicalType{TimeType{timestamp_is_utc, TimeUnit::MILLIS}}; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.ts_scale = 1000; col_schema.logical_type = LogicalType{TimeType{timestamp_is_utc, TimeUnit::MILLIS}}; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.logical_type = LogicalType{TimeType{timestamp_is_utc, TimeUnit::MILLIS}}; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.logical_type = LogicalType{TimeType{timestamp_is_utc, TimeUnit::MICROS}}; } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()() { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.logical_type = LogicalType{TimeType{timestamp_is_utc, TimeUnit::NANOS}}; } template <typename T> std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()() { if (std::is_same_v<T, numeric::decimal32>) { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.decimal_precision = MAX_DECIMAL32_PRECISION; col_schema.logical_type = LogicalType{DecimalType{0, MAX_DECIMAL32_PRECISION}}; } else if (std::is_same_v<T, numeric::decimal64>) { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_decimal64; col_schema.decimal_precision = MAX_DECIMAL64_PRECISION; col_schema.logical_type = LogicalType{DecimalType{0, MAX_DECIMAL64_PRECISION}}; } else if (std::is_same_v<T, numeric::decimal128>) { col_schema.type = Type::FIXED_LEN_BYTE_ARRAY; col_schema.type_length = sizeof(__int128_t); col_schema.stats_dtype = statistics_dtype::dtype_decimal128; col_schema.decimal_precision = MAX_DECIMAL128_PRECISION; col_schema.logical_type = LogicalType{DecimalType{0, MAX_DECIMAL128_PRECISION}}; } else { CUDF_FAIL("Unsupported fixed point type for parquet writer"); } col_schema.converted_type = ConvertedType::DECIMAL; col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs col_schema.logical_type->decimal_type->scale = -col->type().scale(); if (col_meta.is_decimal_precision_set()) { CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale, "Precision must be equal to or greater than scale!"); if (col_schema.type == Type::INT64 and col_meta.get_decimal_precision() < 10) { CUDF_LOG_WARN("Parquet writer: writing a decimal column with precision < 10 as int64"); } col_schema.decimal_precision = col_meta.get_decimal_precision(); col_schema.logical_type->decimal_type->precision = col_meta.get_decimal_precision(); } } template <typename T> std::enable_if_t<cudf::is_nested<T>(), void> operator()() { CUDF_FAIL("This functor is only meant for physical data types"); } template <typename T> std::enable_if_t<cudf::is_dictionary<T>(), void> operator()() { CUDF_FAIL("Dictionary columns are not supported for writing"); } }; inline bool is_col_nullable(cudf::detail::LinkedColPtr const& col, column_in_metadata const& col_meta, single_write_mode write_mode) { if (col_meta.is_nullability_defined()) { CUDF_EXPECTS(col_meta.nullable() or col->null_count() == 0, "Mismatch in metadata prescribed nullability and input column. " "Metadata for input column with nulls cannot prescribe nullability = false"); return col_meta.nullable(); } // For chunked write, when not provided nullability, we assume the worst case scenario // that all columns are nullable. return write_mode == single_write_mode::NO or col->nullable(); } /** * @brief Construct schema from input columns and per-column input options * * Recursively traverses through linked_columns and corresponding metadata to construct schema tree. * The resulting schema tree is stored in a vector in pre-order traversal order. */ std::vector<schema_tree_node> construct_schema_tree( cudf::detail::LinkedColVector const& linked_columns, table_input_metadata& metadata, single_write_mode write_mode, bool int96_timestamps, bool utc_timestamps) { std::vector<schema_tree_node> schema; schema_tree_node root{}; root.type = UNDEFINED_TYPE; root.repetition_type = NO_REPETITION_TYPE; root.name = "schema"; root.num_children = linked_columns.size(); root.parent_idx = -1; // root schema has no parent schema.push_back(std::move(root)); std::function<void(cudf::detail::LinkedColPtr const&, column_in_metadata&, size_t)> add_schema = [&](cudf::detail::LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) { bool col_nullable = is_col_nullable(col, col_meta, write_mode); auto set_field_id = [&schema, parent_idx](schema_tree_node& s, column_in_metadata const& col_meta) { if (schema[parent_idx].name != "list" and col_meta.is_parquet_field_id_set()) { s.field_id = col_meta.get_parquet_field_id(); } }; auto is_last_list_child = [](cudf::detail::LinkedColPtr col) { if (col->type().id() != type_id::LIST) { return false; } auto const child_col_type = col->children[lists_column_view::child_column_index]->type().id(); return child_col_type == type_id::UINT8; }; // There is a special case for a list<int8> column with one byte column child. This column can // have a special flag that indicates we write this out as binary instead of a list. This is a // more efficient storage mechanism for a single-depth list of bytes, but is a departure from // original cuIO behavior so it is locked behind the option. If the option is selected on a // column that isn't a single-depth list<int8> the code will throw. if (col_meta.is_enabled_output_as_binary() && is_last_list_child(col)) { CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0, "Binary column's corresponding metadata should have zero or two children!"); if (col_meta.num_children() > 0) { CUDF_EXPECTS(col->children[lists_column_view::child_column_index]->children.empty(), "Binary column must not be nested!"); } schema_tree_node col_schema{}; col_schema.type = Type::BYTE_ARRAY; col_schema.converted_type = thrust::nullopt; col_schema.stats_dtype = statistics_dtype::dtype_byte_array; col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); col_schema.parent_idx = parent_idx; col_schema.leaf_column = col; set_field_id(col_schema, col_meta); col_schema.output_as_byte_array = col_meta.is_enabled_output_as_binary(); schema.push_back(col_schema); } else if (col->type().id() == type_id::STRUCT) { // if struct, add current and recursively call for all children schema_tree_node struct_schema{}; struct_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); struct_schema.num_children = col->children.size(); struct_schema.parent_idx = parent_idx; set_field_id(struct_schema, col_meta); schema.push_back(std::move(struct_schema)); auto struct_node_index = schema.size() - 1; // for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) { // add_schema(*child_it, struct_node_index); // } CUDF_EXPECTS(col->children.size() == static_cast<size_t>(col_meta.num_children()), "Mismatch in number of child columns between input table and metadata"); for (size_t i = 0; i < col->children.size(); ++i) { add_schema(col->children[i], col_meta.child(i), struct_node_index); } } else if (col->type().id() == type_id::LIST && !col_meta.is_map()) { // List schema is denoted by two levels for each nesting level and one final level for leaf. // The top level is the same name as the column name. // So e.g. List<List<int>> is denoted in the schema by // "col_name" : { "list" : { "element" : { "list" : { "element" } } } } schema_tree_node list_schema_1{}; list_schema_1.converted_type = ConvertedType::LIST; list_schema_1.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); list_schema_1.num_children = 1; list_schema_1.parent_idx = parent_idx; set_field_id(list_schema_1, col_meta); schema.push_back(std::move(list_schema_1)); schema_tree_node list_schema_2{}; list_schema_2.repetition_type = FieldRepetitionType::REPEATED; list_schema_2.name = "list"; list_schema_2.num_children = 1; list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added. schema.push_back(std::move(list_schema_2)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); add_schema(col->children[lists_column_view::child_column_index], col_meta.child(lists_column_view::child_column_index), schema.size() - 1); } else if (col->type().id() == type_id::LIST && col_meta.is_map()) { // Map schema is denoted by a list of struct // e.g. List<Struct<String,String>> will be // "col_name" : { "key_value" : { "key", "value" } } // verify the List child structure is a struct<left_child, right_child> column_view struct_col = *col->children[lists_column_view::child_column_index]; CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct"); CUDF_EXPECTS(struct_col.num_children() == 2, "Map should be a List of struct with two children only but found " + std::to_string(struct_col.num_children())); schema_tree_node map_schema{}; map_schema.converted_type = ConvertedType::MAP; map_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; map_schema.name = col_meta.get_name(); if (col_meta.is_parquet_field_id_set()) { map_schema.field_id = col_meta.get_parquet_field_id(); } map_schema.num_children = 1; map_schema.parent_idx = parent_idx; schema.push_back(std::move(map_schema)); schema_tree_node repeat_group{}; repeat_group.repetition_type = FieldRepetitionType::REPEATED; repeat_group.name = "key_value"; repeat_group.num_children = 2; repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added. schema.push_back(std::move(repeat_group)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2, "Map struct column should have exactly two children"); // verify the col meta of children of the struct have name key and value auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0); left_child_meta.set_name("key"); left_child_meta.set_nullability(false); auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1); right_child_meta.set_name("value"); // check the repetition type of key is required i.e. the col should be non-nullable auto key_col = col->children[lists_column_view::child_column_index]->children[0]; CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, write_mode), "key column cannot be nullable. For chunked writing, explicitly set the " "nullability to false in metadata"); // process key size_type struct_col_index = schema.size() - 1; add_schema(key_col, left_child_meta, struct_col_index); // process value add_schema(col->children[lists_column_view::child_column_index]->children[1], right_child_meta, struct_col_index); } else { // if leaf, add current if (col->type().id() == type_id::STRING) { CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0, "String column's corresponding metadata should have zero or two children"); } else { CUDF_EXPECTS(col_meta.num_children() == 0, "Leaf column's corresponding metadata cannot have children"); } schema_tree_node col_schema{}; bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps(); cudf::type_dispatcher( col->type(), leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96, utc_timestamps}); col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); col_schema.parent_idx = parent_idx; col_schema.leaf_column = col; set_field_id(col_schema, col_meta); schema.push_back(col_schema); } }; CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(), "Mismatch in the number of columns and the corresponding metadata elements"); // Add all linked_columns to schema using parent_idx = 0 (root) for (size_t i = 0; i < linked_columns.size(); ++i) { add_schema(linked_columns[i], metadata.column_metadata[i], 0); } return schema; } /** * @brief Class to store parquet specific information for one data stream. * * Contains information about a single data stream. In case of struct columns, a data stream is one * of the child leaf columns that contains data. * e.g. A column Struct<int, List<float>> contains 2 data streams: * - Struct<int> * - Struct<List<float>> * */ struct parquet_column_view { parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream); [[nodiscard]] parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; [[nodiscard]] column_view cudf_column_view() const { return cudf_col; } [[nodiscard]] Type physical_type() const { return schema_node.type; } [[nodiscard]] ConvertedType converted_type() const { return schema_node.converted_type.value_or(UNKNOWN); } std::vector<std::string> const& get_path_in_schema() { return path_in_schema; } // LIST related member functions [[nodiscard]] uint8_t max_def_level() const noexcept { return _max_def_level; } [[nodiscard]] uint8_t max_rep_level() const noexcept { return _max_rep_level; } [[nodiscard]] bool is_list() const noexcept { return _is_list; } private: // Schema related members schema_tree_node schema_node; std::vector<std::string> path_in_schema; uint8_t _max_def_level = 0; uint8_t _max_rep_level = 0; rmm::device_uvector<uint8_t> _d_nullability; column_view cudf_col; // List-related members bool _is_list; rmm::device_uvector<size_type> _dremel_offsets; ///< For each row, the absolute offset into the repetition and definition ///< level vectors. O(num rows) rmm::device_uvector<uint8_t> _rep_level; rmm::device_uvector<uint8_t> _def_level; std::vector<uint8_t> _nullability; size_type _data_count = 0; }; parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream) : schema_node(schema_node), _d_nullability(0, stream), _dremel_offsets(0, stream), _rep_level(0, stream), _def_level(0, stream) { // Construct single inheritance column_view from linked_column_view auto curr_col = schema_node.leaf_column.get(); column_view single_inheritance_cudf_col = *curr_col; while (curr_col->parent) { auto const& parent = *curr_col->parent; // For list columns, we still need to retain the offset child column. auto children = (parent.type().id() == type_id::LIST) ? std::vector<column_view>{*parent.children[lists_column_view::offsets_column_index], single_inheritance_cudf_col} : std::vector<column_view>{single_inheritance_cudf_col}; single_inheritance_cudf_col = column_view(parent.type(), parent.size(), parent.head(), parent.null_mask(), parent.null_count(), parent.offset(), children); curr_col = curr_col->parent; } cudf_col = single_inheritance_cudf_col; // Construct path_in_schema by travelling up in the schema_tree std::vector<std::string> path; auto curr_schema_node = schema_node; do { path.push_back(curr_schema_node.name); if (curr_schema_node.parent_idx != -1) { curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } } while (curr_schema_node.parent_idx != -1); path_in_schema = std::vector<std::string>(path.crbegin(), path.crend()); // Calculate max definition level by counting the number of levels that are optional (nullable) // and max repetition level by counting the number of REPEATED levels in this column's hierarchy uint16_t max_def_level = 0; uint16_t max_rep_level = 0; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (curr_schema_node.repetition_type == REPEATED or curr_schema_node.repetition_type == OPTIONAL) { ++max_def_level; } if (curr_schema_node.repetition_type == REPEATED) { ++max_rep_level; } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported"); CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported"); _max_def_level = max_def_level; _max_rep_level = max_rep_level; // Construct nullability vector using repetition_type from schema. std::vector<uint8_t> r_nullability; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (not curr_schema_node.is_stub()) { r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL); } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } _nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend()); // TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using // hostdevice_vector. Currently this involves a cudaMemcpyAsync for each column. _d_nullability = cudf::detail::make_device_uvector_async( _nullability, stream, rmm::mr::get_current_device_resource()); _is_list = (_max_rep_level > 0); if (cudf_col.is_empty()) { return; } if (_is_list) { // Top level column's offsets are not applied to all children. Get the effective offset and // size of the leaf column // Calculate row offset into dremel data (repetition/definition values) and the respective // definition and repetition levels cudf::detail::dremel_data dremel = get_dremel_data(cudf_col, _nullability, schema_node.output_as_byte_array, stream); _dremel_offsets = std::move(dremel.dremel_offsets); _rep_level = std::move(dremel.rep_level); _def_level = std::move(dremel.def_level); _data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate stream.synchronize(); } else { // For non-list struct, the size of the root column is the same as the size of the leaf column _data_count = cudf_col.size(); } } parquet_column_device_view parquet_column_view::get_device_view(rmm::cuda_stream_view) const { auto desc = parquet_column_device_view{}; // Zero out all fields desc.stats_dtype = schema_node.stats_dtype; desc.ts_scale = schema_node.ts_scale; if (is_list()) { desc.level_offsets = _dremel_offsets.data(); desc.rep_values = _rep_level.data(); desc.def_values = _def_level.data(); } desc.num_rows = cudf_col.size(); desc.physical_type = physical_type(); desc.converted_type = converted_type(); desc.output_as_byte_array = schema_node.output_as_byte_array; desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 | CompactProtocolReader::NumRequiredBits(max_def_level()); desc.nullability = _d_nullability.data(); return desc; } /** * @brief Gather row group fragments * * This calculates fragments to be used in determining row group boundaries. * * @param frag Destination row group fragments * @param col_desc column description array * @param partitions Information about partitioning of table * @param part_frag_offset A Partition's offset into fragment array * @param fragment_size Number of rows per fragment * @param stream CUDA stream used for device memory operations and kernel launches */ void init_row_group_fragments(cudf::detail::hostdevice_2dvector<PageFragment>& frag, device_span<parquet_column_device_view const> col_desc, host_span<partition_info const> partitions, device_span<int const> part_frag_offset, uint32_t fragment_size, rmm::cuda_stream_view stream) { auto d_partitions = cudf::detail::make_device_uvector_async( partitions, stream, rmm::mr::get_current_device_resource()); InitRowGroupFragments(frag, col_desc, d_partitions, part_frag_offset, fragment_size, stream); frag.device_to_host_sync(stream); } /** * @brief Recalculate page fragments * * This calculates fragments to be used to determine page boundaries within * column chunks. * * @param frag Destination page fragments * @param frag_sizes Array of fragment sizes for each column * @param stream CUDA stream used for device memory operations and kernel launches */ void calculate_page_fragments(device_span<PageFragment> frag, host_span<size_type const> frag_sizes, rmm::cuda_stream_view stream) { auto d_frag_sz = cudf::detail::make_device_uvector_async( frag_sizes, stream, rmm::mr::get_current_device_resource()); CalculatePageFragments(frag, d_frag_sz, stream); } /** * @brief Gather per-fragment statistics * * @param frag_stats output statistics * @param frags Input page fragments * @param int96_timestamps Flag to indicate if timestamps will be written as INT96 * @param stream CUDA stream used for device memory operations and kernel launches */ void gather_fragment_statistics(device_span<statistics_chunk> frag_stats, device_span<PageFragment const> frags, bool int96_timestamps, rmm::cuda_stream_view stream) { rmm::device_uvector<statistics_group> frag_stats_group(frag_stats.size(), stream); InitFragmentStatistics(frag_stats_group, frags, stream); detail::calculate_group_statistics<detail::io_file_format::PARQUET>( frag_stats.data(), frag_stats_group.data(), frag_stats.size(), stream, int96_timestamps); stream.synchronize(); } auto to_nvcomp_compression_type(Compression codec) { if (codec == Compression::SNAPPY) return nvcomp::compression_type::SNAPPY; if (codec == Compression::ZSTD) return nvcomp::compression_type::ZSTD; CUDF_FAIL("Unsupported compression type"); } auto page_alignment(Compression codec) { if (codec == Compression::UNCOMPRESSED or nvcomp::is_compression_disabled(to_nvcomp_compression_type(codec))) { return 1u; } return 1u << nvcomp::compress_input_alignment_bits(to_nvcomp_compression_type(codec)); } size_t max_compression_output_size(Compression codec, uint32_t compression_blocksize) { if (codec == Compression::UNCOMPRESSED) return 0; return compress_max_output_chunk_size(to_nvcomp_compression_type(codec), compression_blocksize); } auto init_page_sizes(hostdevice_2dvector<EncColumnChunk>& chunks, device_span<parquet_column_device_view const> col_desc, uint32_t num_columns, size_t max_page_size_bytes, size_type max_page_size_rows, bool write_v2_headers, Compression compression_codec, rmm::cuda_stream_view stream) { if (chunks.is_empty()) { return cudf::detail::hostdevice_vector<size_type>{}; } chunks.host_to_device_async(stream); // Calculate number of pages and store in respective chunks InitEncoderPages(chunks, {}, {}, {}, col_desc, num_columns, max_page_size_bytes, max_page_size_rows, page_alignment(compression_codec), write_v2_headers, nullptr, nullptr, stream); chunks.device_to_host_sync(stream); int num_pages = 0; for (auto& chunk : chunks.host_view().flat_view()) { chunk.first_page = num_pages; num_pages += chunk.num_pages; } chunks.host_to_device_async(stream); // Now that we know the number of pages, allocate an array to hold per page size and get it // populated cudf::detail::hostdevice_vector<size_type> page_sizes(num_pages, stream); InitEncoderPages(chunks, {}, page_sizes, {}, col_desc, num_columns, max_page_size_bytes, max_page_size_rows, page_alignment(compression_codec), write_v2_headers, nullptr, nullptr, stream); page_sizes.device_to_host_sync(stream); // Get per-page max compressed size cudf::detail::hostdevice_vector<size_type> comp_page_sizes(num_pages, stream); std::transform(page_sizes.begin(), page_sizes.end(), comp_page_sizes.begin(), [compression_codec](auto page_size) { return max_compression_output_size(compression_codec, page_size); }); comp_page_sizes.host_to_device_async(stream); // Use per-page max compressed size to calculate chunk.compressed_size InitEncoderPages(chunks, {}, {}, comp_page_sizes, col_desc, num_columns, max_page_size_bytes, max_page_size_rows, page_alignment(compression_codec), write_v2_headers, nullptr, nullptr, stream); chunks.device_to_host_sync(stream); return comp_page_sizes; } size_t max_page_bytes(Compression compression, size_t max_page_size_bytes) { if (compression == Compression::UNCOMPRESSED) { return max_page_size_bytes; } auto const ncomp_type = to_nvcomp_compression_type(compression); auto const nvcomp_limit = nvcomp::is_compression_disabled(ncomp_type) ? std::nullopt : nvcomp::compress_max_allowed_chunk_size(ncomp_type); auto max_size = std::min(nvcomp_limit.value_or(max_page_size_bytes), max_page_size_bytes); // page size must fit in a 32-bit signed integer return std::min<size_t>(max_size, std::numeric_limits<int32_t>::max()); } std::pair<std::vector<rmm::device_uvector<size_type>>, std::vector<rmm::device_uvector<size_type>>> build_chunk_dictionaries(hostdevice_2dvector<EncColumnChunk>& chunks, host_span<parquet_column_device_view const> col_desc, device_2dspan<PageFragment const> frags, Compression compression, dictionary_policy dict_policy, size_t max_dict_size, rmm::cuda_stream_view stream) { // At this point, we know all chunks and their sizes. We want to allocate dictionaries for each // chunk that can have dictionary auto h_chunks = chunks.host_view().flat_view(); std::vector<rmm::device_uvector<size_type>> dict_data; std::vector<rmm::device_uvector<size_type>> dict_index; if (h_chunks.empty()) { return std::pair(std::move(dict_data), std::move(dict_index)); } if (dict_policy == dictionary_policy::NEVER) { thrust::for_each( h_chunks.begin(), h_chunks.end(), [](auto& chunk) { chunk.use_dictionary = false; }); chunks.host_to_device_async(stream); return std::pair(std::move(dict_data), std::move(dict_index)); } // Allocate slots for each chunk std::vector<rmm::device_uvector<slot_type>> hash_maps_storage; hash_maps_storage.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN || (col_desc[chunk.col_desc_id].output_as_byte_array && col_desc[chunk.col_desc_id].physical_type == Type::BYTE_ARRAY)) { chunk.use_dictionary = false; } else { chunk.use_dictionary = true; // cuCollections suggests using a hash map of size N * (1/0.7) = num_values * 1.43 // https://github.com/NVIDIA/cuCollections/blob/3a49fc71/include/cuco/static_map.cuh#L190-L193 auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values * 1.43, stream); chunk.dict_map_slots = inserted_map.data(); chunk.dict_map_size = inserted_map.size(); } } chunks.host_to_device_async(stream); initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); populate_chunk_hash_maps(frags, stream); chunks.device_to_host_sync(stream); // Make decision about which chunks have dictionary for (auto& ck : h_chunks) { if (not ck.use_dictionary) { continue; } std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() -> std::pair<bool, uint8_t> { // calculate size of chunk if dictionary is used // If we have N unique values then the idx for the last value is N - 1 and nbits is the number // of bits required to encode indices into the dictionary auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0; auto nbits = std::max(CompactProtocolReader::NumRequiredBits(max_dict_index), 1); // We don't use dictionary if the indices are > MAX_DICT_BITS bits because that's the maximum // bitpacking bitsize we efficiently support if (nbits > MAX_DICT_BITS) { return {false, 0}; } auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * nbits, 8); auto dict_enc_size = ck.uniq_data_size + rle_byte_size; if (ck.plain_data_size <= dict_enc_size) { return {false, 0}; } // don't use dictionary if it gets too large for the given compression codec if (dict_policy == dictionary_policy::ADAPTIVE) { auto const unique_size = static_cast<size_t>(ck.uniq_data_size); if (unique_size > max_page_bytes(compression, max_dict_size)) { return {false, 0}; } } return {true, nbits}; }(); } // TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers. dict_data.reserve(h_chunks.size()); dict_index.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (not chunk.use_dictionary) { continue; } size_t dict_data_size = std::min(MAX_DICT_SIZE, chunk.dict_map_size); auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream); auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream); chunk.dict_data = inserted_dict_data.data(); chunk.dict_index = inserted_dict_index.data(); } chunks.host_to_device_async(stream); collect_map_entries(chunks.device_view().flat_view(), stream); get_dictionary_indices(frags, stream); return std::pair(std::move(dict_data), std::move(dict_index)); } /** * @brief Initialize encoder pages. * * @param chunks Column chunk array * @param col_desc Column description array * @param pages Encoder pages array * @param comp_page_sizes Per-page max compressed size * @param page_stats Page statistics array * @param frag_stats Fragment statistics array * @param num_columns Total number of columns * @param num_pages Total number of pages * @param num_stats_bfr Number of statistics buffers * @param compression Compression format * @param max_page_size_bytes Maximum uncompressed page size, in bytes * @param max_page_size_rows Maximum page size, in rows * @param write_v2_headers True if version 2 page headers are to be written * @param stream CUDA stream used for device memory operations and kernel launches */ void init_encoder_pages(hostdevice_2dvector<EncColumnChunk>& chunks, device_span<parquet_column_device_view const> col_desc, device_span<EncPage> pages, cudf::detail::hostdevice_vector<size_type>& comp_page_sizes, statistics_chunk* page_stats, statistics_chunk* frag_stats, uint32_t num_columns, uint32_t num_pages, uint32_t num_stats_bfr, Compression compression, size_t max_page_size_bytes, size_type max_page_size_rows, bool write_v2_headers, rmm::cuda_stream_view stream) { rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream); chunks.host_to_device_async(stream); InitEncoderPages(chunks, pages, {}, comp_page_sizes, col_desc, num_columns, max_page_size_bytes, max_page_size_rows, page_alignment(compression), write_v2_headers, (num_stats_bfr) ? page_stats_mrg.data() : nullptr, (num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr, stream); if (num_stats_bfr > 0) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream); if (num_stats_bfr > num_pages) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats + num_pages, page_stats, page_stats_mrg.data() + num_pages, num_stats_bfr - num_pages, stream); } } stream.synchronize(); } /** * @brief Encode a batch of pages. * * @throws rmm::bad_alloc if there is insufficient space for temporary buffers * * @param chunks column chunk array * @param pages encoder pages array * @param pages_in_batch number of pages in this batch * @param first_page_in_batch first page in batch * @param rowgroups_in_batch number of rowgroups in this batch * @param first_rowgroup first rowgroup in batch * @param page_stats optional page-level statistics (nullptr if none) * @param chunk_stats optional chunk-level statistics (nullptr if none) * @param column_stats optional page-level statistics for column index (nullptr if none) * @param comp_stats optional compression statistics (nullopt if none) * @param compression compression format * @param column_index_truncate_length maximum length of min or max values in column index, in bytes * @param write_v2_headers True if V2 page headers should be written * @param stream CUDA stream used for device memory operations and kernel launches */ void encode_pages(hostdevice_2dvector<EncColumnChunk>& chunks, device_span<EncPage> pages, uint32_t pages_in_batch, uint32_t first_page_in_batch, uint32_t rowgroups_in_batch, uint32_t first_rowgroup, statistics_chunk const* page_stats, statistics_chunk const* chunk_stats, statistics_chunk const* column_stats, std::optional<writer_compression_statistics>& comp_stats, Compression compression, int32_t column_index_truncate_length, bool write_v2_headers, rmm::cuda_stream_view stream) { auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch); auto batch_pages_stats = (page_stats != nullptr) ? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch) : device_span<statistics_chunk const>(); uint32_t max_comp_pages = (compression != Compression::UNCOMPRESSED) ? pages_in_batch : 0; rmm::device_uvector<device_span<uint8_t const>> comp_in(max_comp_pages, stream); rmm::device_uvector<device_span<uint8_t>> comp_out(max_comp_pages, stream); rmm::device_uvector<compression_result> comp_res(max_comp_pages, stream); thrust::fill(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), compression_result{0, compression_status::FAILURE}); EncodePages(batch_pages, write_v2_headers, comp_in, comp_out, comp_res, stream); switch (compression) { case Compression::SNAPPY: if (nvcomp::is_compression_disabled(nvcomp::compression_type::SNAPPY)) { gpu_snap(comp_in, comp_out, comp_res, stream); } else { nvcomp::batched_compress( nvcomp::compression_type::SNAPPY, comp_in, comp_out, comp_res, stream); } break; case Compression::ZSTD: { if (auto const reason = nvcomp::is_compression_disabled(nvcomp::compression_type::ZSTD); reason) { CUDF_FAIL("Compression error: " + reason.value()); } nvcomp::batched_compress(nvcomp::compression_type::ZSTD, comp_in, comp_out, comp_res, stream); break; } case Compression::UNCOMPRESSED: break; default: CUDF_FAIL("invalid compression type"); } // TBD: Not clear if the official spec actually allows dynamically turning off compression at the // chunk-level auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch); DecideCompression(d_chunks_in_batch.flat_view(), stream); EncodePageHeaders(batch_pages, comp_res, batch_pages_stats, chunk_stats, stream); GatherPages(d_chunks_in_batch.flat_view(), pages, stream); if (column_stats != nullptr) { EncodeColumnIndexes(d_chunks_in_batch.flat_view(), {column_stats, pages.size()}, column_index_truncate_length, stream); } auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch); CUDF_CUDA_TRY(cudaMemcpyAsync(h_chunks_in_batch.data(), d_chunks_in_batch.data(), d_chunks_in_batch.flat_view().size_bytes(), cudaMemcpyDefault, stream.value())); if (comp_stats.has_value()) { comp_stats.value() += collect_compression_statistics(comp_in, comp_res, stream); } stream.synchronize(); } /** * @brief Function to calculate the memory needed to encode the column index of the given * column chunk. * * @param ck pointer to column chunk * @param column_index_truncate_length maximum length of min or max values in column index, in bytes * @return Computed buffer size needed to encode the column index */ size_t column_index_buffer_size(EncColumnChunk* ck, int32_t column_index_truncate_length) { // encoding the column index for a given chunk requires: // each list (4 of them) requires 6 bytes of overhead // (1 byte field header, 1 byte type, 4 bytes length) // 1 byte overhead for boundary_order // 1 byte overhead for termination // sizeof(char) for boundary_order // sizeof(bool) * num_pages for null_pages // (ck_max_stats_len + 4) * num_pages * 2 for min/max values // (each binary requires 4 bytes length + ck_max_stats_len) // sizeof(int64_t) * num_pages for null_counts // // so 26 bytes overhead + sizeof(char) + // (sizeof(bool) + sizeof(int64_t) + 2 * (4 + ck_max_stats_len)) * num_pages // // we already have ck->ck_stat_size = 48 + 2 * ck_max_stats_len // all of the overhead and non-stats data can fit in under 48 bytes // // so we can simply use ck_stat_size * num_pages // // add on some extra padding at the end (plus extra 7 bytes of alignment padding) // for scratch space to do stats truncation. // // calculating this per-chunk because the sizes can be wildly different. constexpr size_t padding = 7; return ck->ck_stat_size * ck->num_pages + column_index_truncate_length + padding; } /** * @brief Fill the table metadata with default column names. * * @param table_meta The table metadata to fill */ void fill_table_meta(std::unique_ptr<table_input_metadata> const& table_meta) { // Fill unnamed columns' names in table_meta std::function<void(column_in_metadata&, std::string)> add_default_name = [&](column_in_metadata& col_meta, std::string default_name) { if (col_meta.get_name().empty()) col_meta.set_name(default_name); for (size_type i = 0; i < col_meta.num_children(); ++i) { add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i)); } }; for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) { add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i)); } } /** * @brief Perform the processing steps needed to convert the input table into the output Parquet * data for writing, such as compression and encoding. * * @param[in,out] table_meta The table metadata * @param input The input table * @param partitions Optional partitions to divide the table into, if specified then must be same * size as number of sinks * @param kv_meta Optional user metadata * @param curr_agg_meta The current aggregate writer metadata * @param max_page_fragment_size_opt Optional maximum number of rows in a page fragment * @param max_row_group_size Maximum row group size, in bytes * @param max_page_size_bytes Maximum uncompressed page size, in bytes * @param max_row_group_rows Maximum row group size, in rows * @param max_page_size_rows Maximum page size, in rows * @param column_index_truncate_length maximum length of min or max values in column index, in bytes * @param stats_granularity Level of statistics requested in output file * @param compression Compression format * @param collect_statistics Flag to indicate if statistics should be collected * @param dict_policy Policy for dictionary use * @param max_dictionary_size Maximum dictionary size, in bytes * @param single_write_mode Flag to indicate that we are guaranteeing a single table write * @param int96_timestamps Flag to indicate if timestamps will be written as INT96 * @param utc_timestamps Flag to indicate if timestamps are UTC * @param write_v2_headers True if V2 page headers are to be written * @param out_sink Sink for checking if device write is supported, should not be used to write any * data in this function * @param stream CUDA stream used for device memory operations and kernel launches * @return A tuple of the intermediate results containing the processed data */ auto convert_table_to_parquet_data(table_input_metadata& table_meta, table_view const& input, host_span<partition_info const> partitions, host_span<std::map<std::string, std::string> const> kv_meta, std::unique_ptr<aggregate_writer_metadata> const& curr_agg_meta, std::optional<size_type> max_page_fragment_size_opt, size_t max_row_group_size, size_t max_page_size_bytes, size_type max_row_group_rows, size_type max_page_size_rows, int32_t column_index_truncate_length, statistics_freq stats_granularity, Compression compression, bool collect_compression_statistics, dictionary_policy dict_policy, size_t max_dictionary_size, single_write_mode write_mode, bool int96_timestamps, bool utc_timestamps, bool write_v2_headers, host_span<std::unique_ptr<data_sink> const> out_sink, rmm::cuda_stream_view stream) { auto vec = table_to_linked_columns(input); auto schema_tree = construct_schema_tree(vec, table_meta, write_mode, int96_timestamps, utc_timestamps); // Construct parquet_column_views from the schema tree leaf nodes. std::vector<parquet_column_view> parquet_columns; for (schema_tree_node const& schema_node : schema_tree) { if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); } } // Mass allocation of column_device_views for each parquet_column_view std::vector<column_view> cudf_cols; cudf_cols.reserve(parquet_columns.size()); for (auto const& parq_col : parquet_columns) { cudf_cols.push_back(parq_col.cudf_column_view()); } table_view single_streams_table(cudf_cols); size_type num_columns = single_streams_table.num_columns(); std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end()); // Initialize column description cudf::detail::hostdevice_vector<parquet_column_device_view> col_desc(parquet_columns.size(), stream); std::transform( parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) { return pcol.get_device_view(stream); }); // Init page fragments // 5000 is good enough for up to ~200-character strings. Longer strings and deeply nested columns // will start producing fragments larger than the desired page size, so calculate fragment sizes // for each leaf column. Skip if the fragment size is not the default. size_type max_page_fragment_size = max_page_fragment_size_opt.value_or(default_max_page_fragment_size); std::vector<size_type> column_frag_size(num_columns, max_page_fragment_size); if (input.num_rows() > 0 && not max_page_fragment_size_opt.has_value()) { std::vector<size_t> column_sizes; std::transform(single_streams_table.begin(), single_streams_table.end(), std::back_inserter(column_sizes), [&](auto const& column) { return column_size(column, stream); }); // adjust global fragment size if a single fragment will overrun a rowgroup auto const table_size = std::reduce(column_sizes.begin(), column_sizes.end()); auto const avg_row_len = util::div_rounding_up_safe<size_t>(table_size, input.num_rows()); if (avg_row_len > 0) { auto const rg_frag_size = util::div_rounding_up_safe(max_row_group_size, avg_row_len); max_page_fragment_size = std::min<size_type>(rg_frag_size, max_page_fragment_size); } // dividing page size by average row length will tend to overshoot the desired // page size when there's high variability in the row lengths. instead, shoot // for multiple fragments per page to smooth things out. using 2 was too // unbalanced in final page sizes, so using 4 which seems to be a good // compromise at smoothing things out without getting fragment sizes too small. auto frag_size_fn = [&](auto const& col, size_type col_size) { int const target_frags_per_page = is_col_fixed_width(col) ? 1 : 4; auto const avg_len = target_frags_per_page * util::div_rounding_up_safe<size_type>(col_size, input.num_rows()); if (avg_len > 0) { auto const frag_size = util::div_rounding_up_safe<size_type>(max_page_size_bytes, avg_len); return std::min<size_type>(max_page_fragment_size, frag_size); } else { return max_page_fragment_size; } }; std::transform(single_streams_table.begin(), single_streams_table.end(), column_sizes.begin(), column_frag_size.begin(), frag_size_fn); } // Fragments are calculated in two passes. In the first pass, a uniform number of fragments // per column is used. This is done to satisfy the requirement that each column chunk within // a row group has the same number of rows. After the row group (and thus column chunk) // boundaries are known, a second pass is done to calculate fragments to be used in determining // page boundaries within each column chunk. std::vector<int> num_frag_in_part; std::transform(partitions.begin(), partitions.end(), std::back_inserter(num_frag_in_part), [max_page_fragment_size](auto const& part) { return util::div_rounding_up_unsafe(part.num_rows, max_page_fragment_size); }); size_type num_fragments = std::reduce(num_frag_in_part.begin(), num_frag_in_part.end()); std::vector<int> part_frag_offset; // Store the idx of the first fragment in each partition std::exclusive_scan( num_frag_in_part.begin(), num_frag_in_part.end(), std::back_inserter(part_frag_offset), 0); part_frag_offset.push_back(part_frag_offset.back() + num_frag_in_part.back()); auto d_part_frag_offset = cudf::detail::make_device_uvector_async( part_frag_offset, stream, rmm::mr::get_current_device_resource()); cudf::detail::hostdevice_2dvector<PageFragment> row_group_fragments( num_columns, num_fragments, stream); // Create table_device_view so that corresponding column_device_view data // can be written into col_desc members // These are unused but needs to be kept alive. auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream); rmm::device_uvector<column_device_view> leaf_column_views(0, stream); if (num_fragments != 0) { // Move column info to device col_desc.host_to_device_async(stream); leaf_column_views = create_leaf_column_device_views<parquet_column_device_view>( col_desc, *parent_column_table_device_view, stream); init_row_group_fragments(row_group_fragments, col_desc, partitions, d_part_frag_offset, max_page_fragment_size, stream); } std::unique_ptr<aggregate_writer_metadata> agg_meta; if (!curr_agg_meta) { agg_meta = std::make_unique<aggregate_writer_metadata>( partitions, kv_meta, this_table_schema, num_columns, stats_granularity); } else { agg_meta = std::make_unique<aggregate_writer_metadata>(*curr_agg_meta); // verify the user isn't passing mismatched tables CUDF_EXPECTS(agg_meta->schema_matches(this_table_schema), "Mismatch in schema between multiple calls to write_chunk"); agg_meta->update_files(partitions); } auto global_rowgroup_base = agg_meta->num_row_groups_per_file(); // Decide row group boundaries based on uncompressed data size size_type num_rowgroups = 0; std::vector<int> num_rg_in_part(partitions.size()); for (size_t p = 0; p < partitions.size(); ++p) { size_type curr_rg_num_rows = 0; size_t curr_rg_data_size = 0; int first_frag_in_rg = part_frag_offset[p]; int last_frag_in_part = part_frag_offset[p + 1] - 1; for (auto f = first_frag_in_rg; f <= last_frag_in_part; ++f) { size_t fragment_data_size = 0; for (auto c = 0; c < num_columns; c++) { fragment_data_size += row_group_fragments[c][f].fragment_data_size; } size_type fragment_num_rows = row_group_fragments[0][f].num_rows; // If the fragment size gets larger than rg limit then break off a rg if (f > first_frag_in_rg && // There has to be at least one fragment in row group (curr_rg_data_size + fragment_data_size > max_row_group_size || curr_rg_num_rows + fragment_num_rows > max_row_group_rows)) { auto& rg = agg_meta->file(p).row_groups.emplace_back(); rg.num_rows = curr_rg_num_rows; num_rowgroups++; num_rg_in_part[p]++; curr_rg_num_rows = 0; curr_rg_data_size = 0; first_frag_in_rg = f; } curr_rg_num_rows += fragment_num_rows; curr_rg_data_size += fragment_data_size; // TODO: (wishful) refactor to consolidate with above if block if (f == last_frag_in_part) { auto& rg = agg_meta->file(p).row_groups.emplace_back(); rg.num_rows = curr_rg_num_rows; num_rowgroups++; num_rg_in_part[p]++; } } } std::vector<int> first_rg_in_part; std::exclusive_scan( num_rg_in_part.begin(), num_rg_in_part.end(), std::back_inserter(first_rg_in_part), 0); // Initialize row groups and column chunks auto const num_chunks = num_rowgroups * num_columns; hostdevice_2dvector<EncColumnChunk> chunks(num_rowgroups, num_columns, stream); // total fragments per column (in case they are non-uniform) std::vector<size_type> frags_per_column(num_columns, 0); for (size_t p = 0; p < partitions.size(); ++p) { int f = part_frag_offset[p]; size_type start_row = partitions[p].start_row; for (int r = 0; r < num_rg_in_part[p]; r++) { size_t global_r = global_rowgroup_base[p] + r; // Number of rowgroups already in file/part auto& row_group = agg_meta->file(p).row_groups[global_r]; uint32_t fragments_in_chunk = util::div_rounding_up_unsafe(row_group.num_rows, max_page_fragment_size); row_group.total_byte_size = 0; row_group.columns.resize(num_columns); for (int c = 0; c < num_columns; c++) { EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; ck = {}; ck.col_desc = col_desc.device_ptr() + c; ck.col_desc_id = c; ck.fragments = &row_group_fragments.device_view()[c][f]; ck.stats = nullptr; ck.start_row = start_row; ck.num_rows = (uint32_t)row_group.num_rows; ck.first_fragment = c * num_fragments + f; ck.encodings = 0; auto chunk_fragments = row_group_fragments[c].subspan(f, fragments_in_chunk); // In fragment struct, add a pointer to the chunk it belongs to // In each fragment in chunk_fragments, update the chunk pointer here. for (auto& frag : chunk_fragments) { frag.chunk = &chunks.device_view()[r + first_rg_in_part[p]][c]; } ck.num_values = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) { return l + r.num_values; }); ck.plain_data_size = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, PageFragment frag) { return sum + frag.fragment_data_size; }); auto& column_chunk_meta = row_group.columns[c].meta_data; column_chunk_meta.type = parquet_columns[c].physical_type(); column_chunk_meta.path_in_schema = parquet_columns[c].get_path_in_schema(); column_chunk_meta.codec = UNCOMPRESSED; column_chunk_meta.num_values = ck.num_values; frags_per_column[c] += util::div_rounding_up_unsafe( row_group.num_rows, std::min(column_frag_size[c], max_page_fragment_size)); } f += fragments_in_chunk; start_row += (uint32_t)row_group.num_rows; } } row_group_fragments.host_to_device_async(stream); [[maybe_unused]] auto dict_info_owner = build_chunk_dictionaries( chunks, col_desc, row_group_fragments, compression, dict_policy, max_dictionary_size, stream); // The code preceding this used a uniform fragment size for all columns. Now recompute // fragments with a (potentially) varying number of fragments per column. // first figure out the total number of fragments and calculate the start offset for each column std::vector<size_type> frag_offsets(num_columns, 0); std::exclusive_scan(frags_per_column.begin(), frags_per_column.end(), frag_offsets.begin(), 0); size_type const total_frags = frags_per_column.empty() ? 0 : frag_offsets.back() + frags_per_column.back(); rmm::device_uvector<statistics_chunk> frag_stats(0, stream); cudf::detail::hostdevice_vector<PageFragment> page_fragments(total_frags, stream); // update fragments and/or prepare for fragment statistics calculation if necessary if (total_frags != 0) { if (stats_granularity != statistics_freq::STATISTICS_NONE) { frag_stats.resize(total_frags, stream); } for (int c = 0; c < num_columns; c++) { auto frag_offset = frag_offsets[c]; auto const frag_size = column_frag_size[c]; for (size_t p = 0; p < partitions.size(); ++p) { for (int r = 0; r < num_rg_in_part[p]; r++) { auto const global_r = global_rowgroup_base[p] + r; auto const& row_group = agg_meta->file(p).row_groups[global_r]; uint32_t const fragments_in_chunk = util::div_rounding_up_unsafe(row_group.num_rows, frag_size); EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; ck.fragments = page_fragments.device_ptr(frag_offset); ck.first_fragment = frag_offset; // update the chunk pointer here for each fragment in chunk.fragments for (uint32_t i = 0; i < fragments_in_chunk; i++) { page_fragments[frag_offset + i].chunk = &chunks.device_view()[r + first_rg_in_part[p]][c]; } if (not frag_stats.is_empty()) { ck.stats = frag_stats.data() + frag_offset; } frag_offset += fragments_in_chunk; } } } chunks.host_to_device_async(stream); // re-initialize page fragments page_fragments.host_to_device_async(stream); calculate_page_fragments(page_fragments, column_frag_size, stream); // and gather fragment statistics if (not frag_stats.is_empty()) { gather_fragment_statistics(frag_stats, {page_fragments.device_ptr(), static_cast<size_t>(total_frags)}, int96_timestamps, stream); } } // Build chunk dictionaries and count pages. Sends chunks to device. cudf::detail::hostdevice_vector<size_type> comp_page_sizes = init_page_sizes(chunks, col_desc, num_columns, max_page_size_bytes, max_page_size_rows, write_v2_headers, compression, stream); // Find which partition a rg belongs to std::vector<int> rg_to_part; for (size_t p = 0; p < num_rg_in_part.size(); ++p) { std::fill_n(std::back_inserter(rg_to_part), num_rg_in_part[p], p); } // Batch processing is no longer supported. // This line disables batch processing (so batch size will no longer be limited at 1GB as before). // TODO: All the relevant code will be removed in the follow-up work: // https://github.com/rapidsai/cudf/issues/13440 auto const max_bytes_in_batch = std::numeric_limits<size_t>::max(); // Initialize batches of rowgroups to encode (mainly to limit peak memory usage) std::vector<size_type> batch_list; size_type num_pages = 0; size_t max_uncomp_bfr_size = 0; size_t max_comp_bfr_size = 0; size_t max_chunk_bfr_size = 0; size_type max_pages_in_batch = 0; size_t bytes_in_batch = 0; size_t comp_bytes_in_batch = 0; size_t column_index_bfr_size = 0; for (size_type r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) { size_t rowgroup_size = 0; size_t comp_rowgroup_size = 0; if (r < num_rowgroups) { for (int i = 0; i < num_columns; i++) { EncColumnChunk* ck = &chunks[r][i]; ck->first_page = num_pages; num_pages += ck->num_pages; pages_in_batch += ck->num_pages; rowgroup_size += ck->bfr_size; comp_rowgroup_size += ck->compressed_size; max_chunk_bfr_size = std::max(max_chunk_bfr_size, (size_t)std::max(ck->bfr_size, ck->compressed_size)); if (stats_granularity == statistics_freq::STATISTICS_COLUMN) { column_index_bfr_size += column_index_buffer_size(ck, column_index_truncate_length); } } } // TBD: We may want to also shorten the batch if we have enough pages (not just based on size) if ((r == num_rowgroups) || (groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) { max_uncomp_bfr_size = std::max(max_uncomp_bfr_size, bytes_in_batch); max_comp_bfr_size = std::max(max_comp_bfr_size, comp_bytes_in_batch); max_pages_in_batch = std::max(max_pages_in_batch, pages_in_batch); if (groups_in_batch != 0) { batch_list.push_back(groups_in_batch); groups_in_batch = 0; } bytes_in_batch = 0; comp_bytes_in_batch = 0; pages_in_batch = 0; } bytes_in_batch += rowgroup_size; comp_bytes_in_batch += comp_rowgroup_size; groups_in_batch++; } // Clear compressed buffer size if compression has been turned off if (compression == Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } // Initialize data pointers in batch uint32_t const num_stats_bfr = (stats_granularity != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0; // Buffers need to be padded. // Required by `gpuGatherPages`. rmm::device_buffer uncomp_bfr( cudf::util::round_up_safe(max_uncomp_bfr_size, BUFFER_PADDING_MULTIPLE), stream); rmm::device_buffer comp_bfr(cudf::util::round_up_safe(max_comp_bfr_size, BUFFER_PADDING_MULTIPLE), stream); rmm::device_buffer col_idx_bfr(column_index_bfr_size, stream); rmm::device_uvector<EncPage> pages(num_pages, stream); // This contains stats for both the pages and the rowgroups. TODO: make them separate. rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream); auto bfr_i = static_cast<uint8_t*>(col_idx_bfr.data()); for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { auto bfr = static_cast<uint8_t*>(uncomp_bfr.data()); auto bfr_c = static_cast<uint8_t*>(comp_bfr.data()); for (auto j = 0; j < batch_list[b]; j++, r++) { for (auto i = 0; i < num_columns; i++) { EncColumnChunk& ck = chunks[r][i]; ck.uncompressed_bfr = bfr; ck.compressed_bfr = bfr_c; ck.column_index_blob = bfr_i; bfr += ck.bfr_size; bfr_c += ck.compressed_size; if (stats_granularity == statistics_freq::STATISTICS_COLUMN) { ck.column_index_size = column_index_buffer_size(&ck, column_index_truncate_length); bfr_i += ck.column_index_size; } } } } if (num_pages != 0) { init_encoder_pages(chunks, col_desc, {pages.data(), pages.size()}, comp_page_sizes, (num_stats_bfr) ? page_stats.data() : nullptr, (num_stats_bfr) ? frag_stats.data() : nullptr, num_columns, num_pages, num_stats_bfr, compression, max_page_size_bytes, max_page_size_rows, write_v2_headers, stream); } // Check device write support for all chunks and initialize bounce_buffer. bool all_device_write = true; uint32_t max_write_size = 0; std::optional<writer_compression_statistics> comp_stats; if (collect_compression_statistics) { comp_stats = writer_compression_statistics{}; } // Encode row groups in batches for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { // Count pages in this batch auto const rnext = r + batch_list[b]; auto const first_page_in_batch = chunks[r][0].first_page; auto const first_page_in_next_batch = (rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages; auto const pages_in_batch = first_page_in_next_batch - first_page_in_batch; encode_pages( chunks, {pages.data(), pages.size()}, pages_in_batch, first_page_in_batch, batch_list[b], r, (stats_granularity == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr, (stats_granularity != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages : nullptr, (stats_granularity == statistics_freq::STATISTICS_COLUMN) ? page_stats.data() : nullptr, comp_stats, compression, column_index_truncate_length, write_v2_headers, stream); bool need_sync{false}; for (; r < rnext; r++) { int p = rg_to_part[r]; int global_r = global_rowgroup_base[p] + r - first_rg_in_part[p]; auto& row_group = agg_meta->file(p).row_groups[global_r]; for (auto i = 0; i < num_columns; i++) { auto const& ck = chunks[r][i]; auto const dev_bfr = ck.is_compressed ? ck.compressed_bfr : ck.uncompressed_bfr; auto& column_chunk_meta = row_group.columns[i].meta_data; if (ck.is_compressed) { column_chunk_meta.codec = compression; } if (!out_sink[p]->is_device_write_preferred(ck.compressed_size)) { all_device_write = false; } max_write_size = std::max(max_write_size, ck.compressed_size); update_chunk_encodings(column_chunk_meta.encodings, ck.encodings); if (ck.ck_stat_size != 0) { std::vector<uint8_t> const stats_blob = cudf::detail::make_std_vector_sync( device_span<uint8_t const>(dev_bfr, ck.ck_stat_size), stream); CompactProtocolReader cp(stats_blob.data(), stats_blob.size()); cp.read(&column_chunk_meta.statistics); need_sync = true; } row_group.total_byte_size += ck.compressed_size; column_chunk_meta.total_uncompressed_size = ck.bfr_size; column_chunk_meta.total_compressed_size = ck.compressed_size; } } // Sync before calling the next `encode_pages` which may alter the stats data. if (need_sync) { stream.synchronize(); } } auto bounce_buffer = cudf::detail::pinned_host_vector<uint8_t>(all_device_write ? 0 : max_write_size); return std::tuple{std::move(agg_meta), std::move(pages), std::move(chunks), std::move(global_rowgroup_base), std::move(first_rg_in_part), std::move(batch_list), std::move(rg_to_part), std::move(comp_stats), std::move(uncomp_bfr), std::move(comp_bfr), std::move(col_idx_bfr), std::move(bounce_buffer)}; } } // namespace writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream) : _stream(stream), _compression(to_parquet_compression(options.get_compression())), _max_row_group_size{options.get_row_group_size_bytes()}, _max_row_group_rows{options.get_row_group_size_rows()}, _max_page_size_bytes(max_page_bytes(_compression, options.get_max_page_size_bytes())), _max_page_size_rows(options.get_max_page_size_rows()), _stats_granularity(options.get_stats_level()), _dict_policy(options.get_dictionary_policy()), _max_dictionary_size(options.get_max_dictionary_size()), _max_page_fragment_size(options.get_max_page_fragment_size()), _int96_timestamps(options.is_enabled_int96_timestamps()), _utc_timestamps(options.is_enabled_utc_timestamps()), _write_v2_headers(options.is_enabled_write_v2_headers()), _column_index_truncate_length(options.get_column_index_truncate_length()), _kv_meta(options.get_key_value_metadata()), _single_write_mode(mode), _out_sink(std::move(sinks)), _compression_statistics{options.get_compression_statistics()} { if (options.get_metadata()) { _table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream) : _stream(stream), _compression(to_parquet_compression(options.get_compression())), _max_row_group_size{options.get_row_group_size_bytes()}, _max_row_group_rows{options.get_row_group_size_rows()}, _max_page_size_bytes(max_page_bytes(_compression, options.get_max_page_size_bytes())), _max_page_size_rows(options.get_max_page_size_rows()), _stats_granularity(options.get_stats_level()), _dict_policy(options.get_dictionary_policy()), _max_dictionary_size(options.get_max_dictionary_size()), _max_page_fragment_size(options.get_max_page_fragment_size()), _int96_timestamps(options.is_enabled_int96_timestamps()), _utc_timestamps(options.is_enabled_utc_timestamps()), _write_v2_headers(options.is_enabled_write_v2_headers()), _column_index_truncate_length(options.get_column_index_truncate_length()), _kv_meta(options.get_key_value_metadata()), _single_write_mode(mode), _out_sink(std::move(sinks)), _compression_statistics{options.get_compression_statistics()} { if (options.get_metadata()) { _table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { _current_chunk_offset.resize(_out_sink.size()); // Write file header file_header_s fhdr; fhdr.magic = parquet_magic; for (auto& sink : _out_sink) { sink->host_write(&fhdr, sizeof(fhdr)); } std::fill_n(_current_chunk_offset.begin(), _current_chunk_offset.size(), sizeof(file_header_s)); } void writer::impl::update_compression_statistics( std::optional<writer_compression_statistics> const& compression_stats) { if (compression_stats.has_value() and _compression_statistics != nullptr) { *_compression_statistics += compression_stats.value(); } } void writer::impl::write(table_view const& input, std::vector<partition_info> const& partitions) { _last_write_successful = false; CUDF_EXPECTS(not _closed, "Data has already been flushed to out and closed"); if (not _table_meta) { _table_meta = std::make_unique<table_input_metadata>(input); } fill_table_meta(_table_meta); // All kinds of memory allocation and data compressions/encoding are performed here. // If any error occurs, such as out-of-memory exception, the internal state of the current // writer is still intact. [[maybe_unused]] auto [updated_agg_meta, pages, chunks, global_rowgroup_base, first_rg_in_part, batch_list, rg_to_part, comp_stats, uncomp_bfr, // unused, but contains data for later write to sink comp_bfr, // unused, but contains data for later write to sink col_idx_bfr, // unused, but contains data for later write to sink bounce_buffer] = [&] { try { return convert_table_to_parquet_data(*_table_meta, input, partitions, _kv_meta, _agg_meta, _max_page_fragment_size, _max_row_group_size, _max_page_size_bytes, _max_row_group_rows, _max_page_size_rows, _column_index_truncate_length, _stats_granularity, _compression, _compression_statistics != nullptr, _dict_policy, _max_dictionary_size, _single_write_mode, _int96_timestamps, _utc_timestamps, _write_v2_headers, _out_sink, _stream); } catch (...) { // catch any exception type CUDF_LOG_ERROR( "Parquet writer encountered exception during processing. " "No data has been written to the sink."); throw; // this throws the same exception } }(); // Compression/encoding were all successful. Now write the intermediate results. write_parquet_data_to_sink(updated_agg_meta, pages, chunks, global_rowgroup_base, first_rg_in_part, batch_list, rg_to_part, bounce_buffer); update_compression_statistics(comp_stats); _last_write_successful = true; } void writer::impl::write_parquet_data_to_sink( std::unique_ptr<aggregate_writer_metadata>& updated_agg_meta, device_span<EncPage const> pages, host_2dspan<EncColumnChunk const> chunks, host_span<size_t const> global_rowgroup_base, host_span<int const> first_rg_in_part, host_span<size_type const> batch_list, host_span<int const> rg_to_part, host_span<uint8_t> bounce_buffer) { _agg_meta = std::move(updated_agg_meta); auto const num_columns = chunks.size().second; for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { auto const rnext = r + batch_list[b]; std::vector<std::future<void>> write_tasks; for (; r < rnext; r++) { int const p = rg_to_part[r]; int const global_r = global_rowgroup_base[p] + r - first_rg_in_part[p]; auto& row_group = _agg_meta->file(p).row_groups[global_r]; for (std::size_t i = 0; i < num_columns; i++) { auto const& ck = chunks[r][i]; auto const dev_bfr = ck.is_compressed ? ck.compressed_bfr : ck.uncompressed_bfr; // Skip the range [0, ck.ck_stat_size) since it has already been copied to host // and stored in _agg_meta before. if (_out_sink[p]->is_device_write_preferred(ck.compressed_size)) { write_tasks.push_back(_out_sink[p]->device_write_async( dev_bfr + ck.ck_stat_size, ck.compressed_size, _stream)); } else { CUDF_EXPECTS(bounce_buffer.size() >= ck.compressed_size, "Bounce buffer was not properly initialized."); CUDF_CUDA_TRY(cudaMemcpyAsync(bounce_buffer.data(), dev_bfr + ck.ck_stat_size, ck.compressed_size, cudaMemcpyDefault, _stream.value())); _stream.synchronize(); _out_sink[p]->host_write(bounce_buffer.data(), ck.compressed_size); } auto& column_chunk_meta = row_group.columns[i].meta_data; column_chunk_meta.data_page_offset = _current_chunk_offset[p] + ((ck.use_dictionary) ? ck.dictionary_size : 0); column_chunk_meta.dictionary_page_offset = (ck.use_dictionary) ? _current_chunk_offset[p] : 0; _current_chunk_offset[p] += ck.compressed_size; } } for (auto const& task : write_tasks) { task.wait(); } } if (_stats_granularity == statistics_freq::STATISTICS_COLUMN) { // need pages on host to create offset_indexes auto const h_pages = cudf::detail::make_host_vector_sync(pages, _stream); // add column and offset indexes to metadata for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { auto const rnext = r + batch_list[b]; auto curr_page_idx = chunks[r][0].first_page; for (; r < rnext; r++) { int const p = rg_to_part[r]; int const global_r = global_rowgroup_base[p] + r - first_rg_in_part[p]; auto const& row_group = _agg_meta->file(p).row_groups[global_r]; for (std::size_t i = 0; i < num_columns; i++) { EncColumnChunk const& ck = chunks[r][i]; auto const& column_chunk_meta = row_group.columns[i].meta_data; // start transfer of the column index std::vector<uint8_t> column_idx; column_idx.resize(ck.column_index_size); CUDF_CUDA_TRY(cudaMemcpyAsync(column_idx.data(), ck.column_index_blob, ck.column_index_size, cudaMemcpyDefault, _stream.value())); // calculate offsets while the column index is transferring int64_t curr_pg_offset = column_chunk_meta.data_page_offset; OffsetIndex offset_idx; for (uint32_t pg = 0; pg < ck.num_pages; pg++) { auto const& enc_page = h_pages[curr_page_idx++]; // skip dict pages if (enc_page.page_type == PageType::DICTIONARY_PAGE) { continue; } int32_t this_page_size = enc_page.hdr_size + enc_page.max_data_size; // first_row_idx is relative to start of row group PageLocation loc{curr_pg_offset, this_page_size, enc_page.start_row - ck.start_row}; offset_idx.page_locations.push_back(loc); curr_pg_offset += this_page_size; } _stream.synchronize(); _agg_meta->file(p).offset_indexes.emplace_back(std::move(offset_idx)); _agg_meta->file(p).column_indexes.emplace_back(std::move(column_idx)); } } } } } std::unique_ptr<std::vector<uint8_t>> writer::impl::close( std::vector<std::string> const& column_chunks_file_path) { if (_closed) { return nullptr; } _closed = true; if (not _last_write_successful) { return nullptr; } for (size_t p = 0; p < _out_sink.size(); p++) { std::vector<uint8_t> buffer; CompactProtocolWriter cpw(&buffer); file_ender_s fendr; if (_stats_granularity == statistics_freq::STATISTICS_COLUMN) { auto& fmd = _agg_meta->file(p); // write column indices, updating column metadata along the way int chunkidx = 0; for (auto& r : fmd.row_groups) { for (auto& c : r.columns) { auto const& index = fmd.column_indexes[chunkidx++]; c.column_index_offset = _out_sink[p]->bytes_written(); c.column_index_length = index.size(); _out_sink[p]->host_write(index.data(), index.size()); } } // write offset indices, updating column metadata along the way chunkidx = 0; for (auto& r : fmd.row_groups) { for (auto& c : r.columns) { auto const& offsets = fmd.offset_indexes[chunkidx++]; buffer.resize(0); int32_t len = cpw.write(offsets); c.offset_index_offset = _out_sink[p]->bytes_written(); c.offset_index_length = len; _out_sink[p]->host_write(buffer.data(), buffer.size()); } } } buffer.resize(0); fendr.footer_len = static_cast<uint32_t>(cpw.write(_agg_meta->get_metadata(p))); fendr.magic = parquet_magic; _out_sink[p]->host_write(buffer.data(), buffer.size()); _out_sink[p]->host_write(&fendr, sizeof(fendr)); _out_sink[p]->flush(); } // Optionally output raw file metadata with the specified column chunk file path if (column_chunks_file_path.size() > 0) { CUDF_EXPECTS(column_chunks_file_path.size() == _agg_meta->num_files(), "Expected one column chunk path per output file"); _agg_meta->set_file_paths(column_chunks_file_path); file_header_s fhdr = {parquet_magic}; std::vector<uint8_t> buffer; CompactProtocolWriter cpw(&buffer); buffer.insert(buffer.end(), reinterpret_cast<uint8_t const*>(&fhdr), reinterpret_cast<uint8_t const*>(&fhdr) + sizeof(fhdr)); file_ender_s fendr; fendr.magic = parquet_magic; fendr.footer_len = static_cast<uint32_t>(cpw.write(_agg_meta->get_merged_metadata())); buffer.insert(buffer.end(), reinterpret_cast<uint8_t const*>(&fendr), reinterpret_cast<uint8_t const*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(buffer)); } else { return {nullptr}; } return nullptr; } // Forward to implementation writer::writer(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream) : _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream)) { } writer::writer(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream) : _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const& table, std::vector<partition_info> const& partitions) { _impl->write( table, partitions.empty() ? std::vector<partition_info>{{0, table.num_rows()}} : partitions); } // Forward to implementation std::unique_ptr<std::vector<uint8_t>> writer::close( std::vector<std::string> const& column_chunks_file_path) { return _impl->close(column_chunks_file_path); } std::unique_ptr<std::vector<uint8_t>> writer::merge_row_group_metadata( std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list) { std::vector<uint8_t> output; CompactProtocolWriter cpw(&output); FileMetaData md; md.row_groups.reserve(metadata_list.size()); for (auto const& blob : metadata_list) { CompactProtocolReader cpreader( blob.get()->data(), std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s)); cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header if (md.num_rows == 0) { cpreader.read(&md); } else { FileMetaData tmp; cpreader.read(&tmp); md.row_groups.insert(md.row_groups.end(), std::make_move_iterator(tmp.row_groups.begin()), std::make_move_iterator(tmp.row_groups.end())); md.num_rows += tmp.num_rows; } } // Remove any LogicalType::UNKNOWN annotations that were passed in as they can confuse // column type inferencing. // See https://github.com/rapidsai/cudf/pull/14264#issuecomment-1778311615 for (auto& se : md.schema) { if (se.logical_type.has_value() && se.logical_type.value().type == LogicalType::UNKNOWN) { se.logical_type = thrust::nullopt; } } // Thrift-encode the resulting output file_header_s fhdr; file_ender_s fendr; fhdr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<uint8_t const*>(&fhdr), reinterpret_cast<uint8_t const*>(&fhdr) + sizeof(fhdr)); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<uint8_t const*>(&fendr), reinterpret_cast<uint8_t const*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(output)); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/writer_impl.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.hpp * @brief cuDF-IO Parquet writer class implementation header */ #pragma once #include "parquet.hpp" #include "parquet_gpu.hpp" #include <cudf/io/data_sink.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/io/detail/parquet.hpp> #include <cudf/io/parquet.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <memory> #include <string> #include <vector> namespace cudf::io::parquet::detail { // Forward internal classes struct aggregate_writer_metadata; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; /** * @brief Implementation for parquet writer */ class writer::impl { public: /** * @brief Constructor with writer options. * * @param sink data_sink's for storing dataset * @param options Settings for controlling behavior * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches */ explicit impl(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Constructor with chunked writer options. * * @param sink data_sink's for storing dataset * @param options Settings for controlling behavior * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches */ explicit impl(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Destructor to complete any incomplete write and release resources. */ ~impl(); /** * @brief Initializes the states before writing. */ void init_state(); /** * @brief Updates writer-level statistics with data from the current table. * * @param compression_stats Optional compression statistics from the current table */ void update_compression_statistics( std::optional<writer_compression_statistics> const& compression_stats); /** * @brief Writes a single subtable as part of a larger parquet file/table write, * normally used for chunked writing. * * @throws rmm::bad_alloc if there is insufficient space for temporary buffers * * @param[in] table The table information to be written * @param[in] partitions Optional partitions to divide the table into. If specified, must be same * size as number of sinks. */ void write(table_view const& table, std::vector<partition_info> const& partitions); /** * @brief Finishes the chunked/streamed write process. * * @param[in] column_chunks_file_path Column chunks file path to be set in the raw output metadata * @return A parquet-compatible blob that contains the data for all rowgroups in the list only if * `column_chunks_file_path` is provided, else null. */ std::unique_ptr<std::vector<uint8_t>> close( std::vector<std::string> const& column_chunks_file_path = {}); private: /** * @brief Write the intermediate Parquet data into the data sink. * * The intermediate data is generated from processing (compressing/encoding) a cuDF input table * by `convert_table_to_parquet_data` called in the `write()` function. * * @param updated_agg_meta The updated aggregate data after processing the input * @param pages Encoded pages * @param chunks Column chunks * @param global_rowgroup_base Numbers of rowgroups in each file/partition * @param first_rg_in_part The first rowgroup in each partition * @param batch_list The batches of rowgroups to encode * @param rg_to_part A map from rowgroup to partition * @param[out] bounce_buffer Temporary host output buffer */ void write_parquet_data_to_sink(std::unique_ptr<aggregate_writer_metadata>& updated_agg_meta, device_span<EncPage const> pages, host_2dspan<EncColumnChunk const> chunks, host_span<size_t const> global_rowgroup_base, host_span<int const> first_rg_in_part, host_span<size_type const> batch_list, host_span<int const> rg_to_part, host_span<uint8_t> bounce_buffer); // Cuda stream to be used rmm::cuda_stream_view _stream; // Writer options. Compression const _compression; size_t const _max_row_group_size; size_type const _max_row_group_rows; size_t const _max_page_size_bytes; size_type const _max_page_size_rows; statistics_freq const _stats_granularity; dictionary_policy const _dict_policy; size_t const _max_dictionary_size; std::optional<size_type> const _max_page_fragment_size; bool const _int96_timestamps; bool const _utc_timestamps; bool const _write_v2_headers; int32_t const _column_index_truncate_length; std::vector<std::map<std::string, std::string>> const _kv_meta; // Optional user metadata. cudf::io::detail::single_write_mode const _single_write_mode; // Special parameter only used by `write()` to // indicate that we are guaranteeing a single table // write. This enables some internal optimizations. std::vector<std::unique_ptr<data_sink>> const _out_sink; // Internal states, filled during `write()` and written to sink during `write` and `close()`. std::unique_ptr<table_input_metadata> _table_meta; std::unique_ptr<aggregate_writer_metadata> _agg_meta; std::vector<std::size_t> _current_chunk_offset; // To track if the last write(table) call // completed successfully current write // position for rowgroups/chunks. std::shared_ptr<writer_compression_statistics> _compression_statistics; // Optional output bool _last_write_successful = false; bool _closed = false; // To track if the output has been written to sink. }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl_chunking.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl.hpp" #include "reader_impl_chunking.hpp" #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <io/utilities/time_utils.cuh> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/sort.h> namespace cudf::io::parquet::detail { namespace { struct cumulative_row_info { size_t row_count; // cumulative row count size_t size_bytes; // cumulative size in bytes int key; // schema index }; #if defined(CHUNKING_DEBUG) void print_cumulative_page_info(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::device_uvector<int32_t> const& page_index, rmm::device_uvector<cumulative_row_info> const& c_info, rmm::cuda_stream_view stream) { pages.device_to_host_sync(stream); printf("------------\nCumulative sizes by page\n"); std::vector<int> schemas(pages.size()); std::vector<int> h_page_index(pages.size()); CUDF_CUDA_TRY(cudaMemcpy( h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDefault)); std::vector<cumulative_row_info> h_cinfo(pages.size()); CUDF_CUDA_TRY(cudaMemcpy( h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), cudaMemcpyDefault)); auto schema_iter = cudf::detail::make_counting_transform_iterator( 0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; }); thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin()); auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end()); schemas.resize(last - schemas.begin()); printf("Num schemas: %lu\n", schemas.size()); for (size_t idx = 0; idx < schemas.size(); idx++) { printf("Schema %d\n", schemas[idx]); for (size_t pidx = 0; pidx < pages.size(); pidx++) { auto const& page = pages[h_page_index[pidx]]; if (page.flags & PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { continue; } printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); } } } void print_cumulative_row_info(host_span<cumulative_row_info const> sizes, std::string const& label, std::optional<std::vector<chunk_read_info>> splits = std::nullopt) { if (splits.has_value()) { printf("------------\nSplits\n"); for (size_t idx = 0; idx < splits->size(); idx++) { printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows); } } printf("------------\nCumulative sizes %s\n", label.c_str()); for (size_t idx = 0; idx < sizes.size(); idx++) { printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key); if (splits.has_value()) { // if we have a split at this row count and this is the last instance of this row count auto start = thrust::make_transform_iterator( splits->begin(), [](chunk_read_info const& i) { return i.skip_rows; }); auto end = start + splits->size(); auto split = std::find(start, end, sizes[idx].row_count); auto const split_index = [&]() -> int { if (split != end && ((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) { return static_cast<int>(std::distance(start, split)); } return idx == 0 ? 0 : -1; }(); if (split_index >= 0) { printf(" <-- split {%lu, %lu}", splits.value()[split_index].skip_rows, splits.value()[split_index].num_rows); } } printf("\n"); } } #endif // CHUNKING_DEBUG /** * @brief Functor which reduces two cumulative_row_info structs of the same key. */ struct cumulative_row_sum { cumulative_row_info operator() __device__(cumulative_row_info const& a, cumulative_row_info const& b) const { return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key}; } }; /** * @brief Functor which computes the total data size for a given type of cudf column. * * In the case of strings, the return size does not include the chars themselves. That * information is tracked separately (see PageInfo::str_bytes). */ struct row_size_functor { __device__ size_t validity_size(size_t num_rows, bool nullable) { return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0; } template <typename T> __device__ size_t operator()(size_t num_rows, bool nullable) { auto const element_size = sizeof(device_storage_type_t<T>); return (element_size * num_rows) + validity_size(num_rows, nullable); } }; template <> __device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable) { auto const offset_size = sizeof(size_type); // NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset // for the entire column, whereas this is adding an extra offset per page. So we will get a // small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better // to overestimate size somewhat than to underestimate it and potentially generate chunks // that are too large. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable) { return validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable) { // only returns the size of offsets and validity. the size of the actual string chars // is tracked separately. auto const offset_size = sizeof(size_type); // see note about offsets in the list_view template. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } /** * @brief Functor which computes the total output cudf data size for all of * the data in this page. * * Sums across all nesting levels. */ struct get_cumulative_row_info { PageInfo const* const pages; __device__ cumulative_row_info operator()(size_type index) { auto const& page = pages[index]; if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return cumulative_row_info{0, 0, page.src_col_schema}; } // total nested size, not counting string data auto iter = cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) { auto const& pni = page.nesting[i]; return cudf::type_dispatcher( data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable); }); size_t const row_count = static_cast<size_t>(page.nesting[0].size); return { row_count, thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes, page.src_col_schema}; } }; /** * @brief Functor which computes the effective size of all input columns by page. * * For a given row, we want to find the cost of all pages for all columns involved * in loading up to that row. The complication here is that not all pages are the * same size between columns. Example: * * page row counts * Column A: 0 <----> 100 <----> 200 * Column B: 0 <---------------> 200 <--------> 400 | * if we decide to split at row 100, we don't really know the actual amount of bytes in column B * at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that * page. Essentially, a conservative over-estimate of the real size. */ struct row_total_size { cumulative_row_info const* c_info; size_type const* key_offsets; size_t num_keys; __device__ cumulative_row_info operator()(cumulative_row_info const& i) { // sum sizes for each input column at this row size_t sum = 0; for (int idx = 0; idx < num_keys; idx++) { auto const start = key_offsets[idx]; auto const end = key_offsets[idx + 1]; auto iter = cudf::detail::make_counting_transform_iterator( 0, [&] __device__(size_type i) { return c_info[i].row_count; }); auto const page_index = thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter; sum += c_info[page_index].size_bytes; } return {i.row_count, sum, i.key}; } }; /** * @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read * limit, determine the set of splits. * * @param sizes Vector of cumulative {row_count, byte_size} pairs * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns */ std::vector<chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes, size_t num_rows, size_t chunk_read_limit) { // now we have an array of {row_count, real output bytes}. just walk through it and generate // splits. // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch // sizes are reasonably large, this shouldn't iterate too many times std::vector<chunk_read_info> splits; { size_t cur_pos = 0; size_t cur_cumulative_size = 0; size_t cur_row_count = 0; auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) { return i.size_bytes - cur_cumulative_size; }); auto end = start + sizes.size(); while (cur_row_count < num_rows) { int64_t split_pos = thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start; // if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back // one. if (static_cast<size_t>(split_pos) >= sizes.size() || (sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) { split_pos--; } // best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in // a loop because all of the cumulative sizes for all the pages are sorted into one big list. // so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in // the list twice. so we have to iterate until we skip past all of them. The idea is that we // either do this, or we have to call unique() on the input first. while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) && (split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) { split_pos++; } auto const start_row = cur_row_count; cur_row_count = sizes[split_pos].row_count; splits.push_back(chunk_read_info{start_row, cur_row_count - start_row}); cur_pos = split_pos; cur_cumulative_size = sizes[split_pos].size_bytes; } } // print_cumulative_row_info(sizes, "adjusted", splits); return splits; } /** * @brief Converts cuDF units to Parquet units. * * @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type. */ [[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info( type_id column_type_id, type_id timestamp_type_id, Type physical, thrust::optional<ConvertedType> converted, int32_t length) { int32_t type_width = (physical == FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { type_width = 1; // I32 -> I8 } else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) { type_width = 2; // I32 -> I16 } else if (column_type_id == type_id::INT32) { type_width = 4; // str -> hash32 } else if (is_chrono(data_type{column_type_id})) { clock_rate = to_clockrate(timestamp_type_id); } int8_t converted_type = converted.value_or(UNKNOWN); if (converted_type == DECIMAL && column_type_id != type_id::FLOAT64 && not cudf::is_fixed_point(data_type{column_type_id})) { converted_type = UNKNOWN; // Not converting to float64 or decimal } return std::make_tuple(type_width, clock_rate, converted_type); } /** * @brief Return the required number of bits to store a value. */ template <typename T = uint8_t> [[nodiscard]] T required_bits(uint32_t max_level) { return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level)); } struct row_count_compare { __device__ bool operator()(cumulative_row_info const& a, cumulative_row_info const& b) { return a.row_count < b.row_count; } }; } // anonymous namespace void reader::impl::create_global_chunk_info() { auto const num_rows = _file_itm_data.global_num_rows; auto const& row_groups_info = _file_itm_data.row_groups; auto& chunks = _file_itm_data.chunks; // Descriptors for all the chunks that make up the selected columns auto const num_input_columns = _input_columns.size(); auto const num_chunks = row_groups_info.size() * num_input_columns; // Initialize column chunk information auto remaining_rows = num_rows; for (auto const& rg : row_groups_info) { auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); auto const row_group_start = rg.start_row; auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); // generate ColumnChunkDesc objects for everything to be decoded (all input columns) for (size_t i = 0; i < num_input_columns; ++i) { auto col = _input_columns[i]; // look up metadata auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); auto& schema = _metadata->get_schema(col.schema_idx); auto [type_width, clock_rate, converted_type] = conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()), _timestamp_type.id(), schema.type, schema.converted_type, schema.type_length); chunks.push_back(ColumnChunkDesc(col_meta.total_compressed_size, nullptr, col_meta.num_values, schema.type, type_width, row_group_start, row_group_rows, schema.max_definition_level, schema.max_repetition_level, _metadata->get_output_nesting_depth(col.schema_idx), required_bits(schema.max_definition_level), required_bits(schema.max_repetition_level), col_meta.codec, converted_type, schema.logical_type, schema.decimal_precision, clock_rate, i, col.schema_idx)); } remaining_rows -= row_group_rows; } } void reader::impl::compute_input_passes() { // at this point, row_groups has already been filtered down to just the row groups we need to // handle optional skip_rows/num_rows parameters. auto const& row_groups_info = _file_itm_data.row_groups; // if the user hasn't specified an input size limit, read everything in a single pass. if (_input_pass_read_limit == 0) { _file_itm_data.input_pass_row_group_offsets.push_back(0); _file_itm_data.input_pass_row_group_offsets.push_back(row_groups_info.size()); return; } // generate passes. make sure to account for the case where a single row group doesn't fit within // std::size_t const read_limit = _input_pass_read_limit > 0 ? _input_pass_read_limit : std::numeric_limits<std::size_t>::max(); std::size_t cur_pass_byte_size = 0; std::size_t cur_rg_start = 0; std::size_t cur_row_count = 0; _file_itm_data.input_pass_row_group_offsets.push_back(0); _file_itm_data.input_pass_row_count.push_back(0); for (size_t cur_rg_index = 0; cur_rg_index < row_groups_info.size(); cur_rg_index++) { auto const& rgi = row_groups_info[cur_rg_index]; auto const& row_group = _metadata->get_row_group(rgi.index, rgi.source_index); // can we add this row group if (cur_pass_byte_size + row_group.total_byte_size >= read_limit) { // A single row group (the current one) is larger than the read limit: // We always need to include at least one row group, so end the pass at the end of the current // row group if (cur_rg_start == cur_rg_index) { _file_itm_data.input_pass_row_group_offsets.push_back(cur_rg_index + 1); _file_itm_data.input_pass_row_count.push_back(cur_row_count + row_group.num_rows); cur_rg_start = cur_rg_index + 1; cur_pass_byte_size = 0; } // End the pass at the end of the previous row group else { _file_itm_data.input_pass_row_group_offsets.push_back(cur_rg_index); _file_itm_data.input_pass_row_count.push_back(cur_row_count); cur_rg_start = cur_rg_index; cur_pass_byte_size = row_group.total_byte_size; } } else { cur_pass_byte_size += row_group.total_byte_size; } cur_row_count += row_group.num_rows; } // add the last pass if necessary if (_file_itm_data.input_pass_row_group_offsets.back() != row_groups_info.size()) { _file_itm_data.input_pass_row_group_offsets.push_back(row_groups_info.size()); _file_itm_data.input_pass_row_count.push_back(cur_row_count); } } void reader::impl::setup_next_pass() { // this will also cause the previous pass information to be deleted _pass_itm_data = std::make_unique<cudf::io::parquet::detail::pass_intermediate_data>(); // setup row groups to be loaded for this pass auto const row_group_start = _file_itm_data.input_pass_row_group_offsets[_current_input_pass]; auto const row_group_end = _file_itm_data.input_pass_row_group_offsets[_current_input_pass + 1]; auto const num_row_groups = row_group_end - row_group_start; _pass_itm_data->row_groups.resize(num_row_groups); std::copy(_file_itm_data.row_groups.begin() + row_group_start, _file_itm_data.row_groups.begin() + row_group_end, _pass_itm_data->row_groups.begin()); auto const num_passes = _file_itm_data.input_pass_row_group_offsets.size() - 1; CUDF_EXPECTS(_current_input_pass < num_passes, "Encountered an invalid read pass index"); auto const chunks_per_rowgroup = _input_columns.size(); auto const num_chunks = chunks_per_rowgroup * num_row_groups; auto chunk_start = _file_itm_data.chunks.begin() + (row_group_start * chunks_per_rowgroup); auto chunk_end = _file_itm_data.chunks.begin() + (row_group_end * chunks_per_rowgroup); _pass_itm_data->chunks = cudf::detail::hostdevice_vector<ColumnChunkDesc>(num_chunks, _stream); std::copy(chunk_start, chunk_end, _pass_itm_data->chunks.begin()); // adjust skip_rows and num_rows by what's available in the row groups we are processing if (num_passes == 1) { _pass_itm_data->skip_rows = _file_itm_data.global_skip_rows; _pass_itm_data->num_rows = _file_itm_data.global_num_rows; } else { auto const global_start_row = _file_itm_data.global_skip_rows; auto const global_end_row = global_start_row + _file_itm_data.global_num_rows; auto const start_row = std::max(_file_itm_data.input_pass_row_count[_current_input_pass], global_start_row); auto const end_row = std::min(_file_itm_data.input_pass_row_count[_current_input_pass + 1], global_end_row); // skip_rows is always global in the sense that it is relative to the first row of // everything we will be reading, regardless of what pass we are on. // num_rows is how many rows we are reading this pass. _pass_itm_data->skip_rows = global_start_row + _file_itm_data.input_pass_row_count[_current_input_pass]; _pass_itm_data->num_rows = end_row - start_row; } } void reader::impl::compute_splits_for_pass() { auto const skip_rows = _pass_itm_data->skip_rows; auto const num_rows = _pass_itm_data->num_rows; // simple case : no chunk size, no splits if (_output_chunk_read_limit <= 0) { _pass_itm_data->output_chunk_read_info = std::vector<chunk_read_info>{{skip_rows, num_rows}}; return; } auto& pages = _pass_itm_data->pages_info; auto const& page_keys = _pass_itm_data->page_keys; auto const& page_index = _pass_itm_data->page_index; // generate cumulative row counts and sizes rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), _stream); // convert PageInfo to cumulative_row_info auto page_input = thrust::make_transform_iterator(page_index.begin(), get_cumulative_row_info{pages.device_ptr()}); thrust::inclusive_scan_by_key(rmm::exec_policy(_stream), page_keys.begin(), page_keys.end(), page_input, c_info.begin(), thrust::equal_to{}, cumulative_row_sum{}); // print_cumulative_page_info(pages, page_index, c_info, stream); // sort by row count rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, _stream}; thrust::sort( rmm::exec_policy(_stream), c_info_sorted.begin(), c_info_sorted.end(), row_count_compare{}); // std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size()); // CUDF_CUDA_TRY(cudaMemcpy(h_c_info_sorted.data(), // c_info_sorted.data(), // sizeof(cumulative_row_info) * c_info_sorted.size(), // cudaMemcpyDefault)); // print_cumulative_row_info(h_c_info_sorted, "raw"); // generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per // key rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, _stream); auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(_stream), page_keys.begin(), page_keys.end(), thrust::make_constant_iterator(1), thrust::make_discard_iterator(), key_offsets.begin()) .second; size_t const num_unique_keys = key_offsets_end - key_offsets.begin(); thrust::exclusive_scan( rmm::exec_policy(_stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin()); // adjust the cumulative info such that for each row count, the size includes any pages that span // that row count. this is so that if we have this case: // page row counts // Column A: 0 <----> 100 <----> 200 // Column B: 0 <---------------> 200 <--------> 400 // | // if we decide to split at row 100, we don't really know the actual amount of bytes in column B // at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that // page. // rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), _stream); thrust::transform(rmm::exec_policy(_stream), c_info_sorted.begin(), c_info_sorted.end(), aggregated_info.begin(), row_total_size{c_info.data(), key_offsets.data(), num_unique_keys}); // bring back to the cpu std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size()); CUDF_CUDA_TRY(cudaMemcpyAsync(h_aggregated_info.data(), aggregated_info.data(), sizeof(cumulative_row_info) * c_info.size(), cudaMemcpyDefault, _stream.value())); _stream.synchronize(); // generate the actual splits _pass_itm_data->output_chunk_read_info = find_splits(h_aggregated_info, num_rows, _output_chunk_read_limit); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/parquet_common.hpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> namespace cudf::io::parquet::detail { // Max decimal precisions according to the parquet spec: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#decimal auto constexpr MAX_DECIMAL32_PRECISION = 9; auto constexpr MAX_DECIMAL64_PRECISION = 18; auto constexpr MAX_DECIMAL128_PRECISION = 38; // log10(2^(sizeof(int128_t) * 8 - 1) - 1) /** * @brief Basic data types in Parquet, determines how data is physically stored */ enum Type : int8_t { UNDEFINED_TYPE = -1, // Undefined for non-leaf nodes BOOLEAN = 0, INT32 = 1, INT64 = 2, INT96 = 3, // Deprecated FLOAT = 4, DOUBLE = 5, BYTE_ARRAY = 6, FIXED_LEN_BYTE_ARRAY = 7, }; /** * @brief High-level data types in Parquet, determines how data is logically interpreted */ enum ConvertedType { UNKNOWN = -1, // No type information present UTF8 = 0, // a BYTE_ARRAY may contain UTF8 encoded chars MAP = 1, // a map is converted as an optional field containing a repeated key/value pair MAP_KEY_VALUE = 2, // a key/value pair is converted into a group of two fields LIST = 3, // a list is converted into an optional field containing a repeated field for its values ENUM = 4, // an enum is converted into a binary field DECIMAL = 5, // A decimal value. 10^(-scale) encoded as 2's complement big endian // (precision=number of digits, scale=location of decimal point) DATE = 6, // A Date, stored as days since Unix epoch, encoded as the INT32 physical type. TIME_MILLIS = 7, // A time. The total number of milliseconds since midnight.The value is stored // as an INT32 physical type. TIME_MICROS = 8, // A time. The total number of microseconds since midnight. The value is stored // as an INT64 physical type. TIMESTAMP_MILLIS = 9, // A date/time combination, recorded as milliseconds since the Unix epoch // using physical type of INT64. TIMESTAMP_MICROS = 10, // A date/time combination, microseconds since the Unix epoch as INT64 UINT_8 = 11, // An unsigned integer 8-bit value as INT32 UINT_16 = 12, // An unsigned integer 16-bit value as INT32 UINT_32 = 13, // An unsigned integer 32-bit value as INT32 UINT_64 = 14, // An unsigned integer 64-bit value as INT64 INT_8 = 15, // A signed integer 8-bit value as INT32 INT_16 = 16, // A signed integer 16-bit value as INT32 INT_32 = 17, // A signed integer 32-bit value as INT32 INT_64 = 18, // A signed integer 8-bit value as INT64 JSON = 19, // A JSON document embedded within a single UTF8 column. BSON = 20, // A BSON document embedded within a single BINARY column. INTERVAL = 21, // This type annotates a time interval stored as a FIXED_LEN_BYTE_ARRAY of length // 12 for 3 integers {months,days,milliseconds} NA = 25, // No Type information, For eg, all-nulls. }; /** * @brief Encoding types for the actual data stream */ enum class Encoding : uint8_t { PLAIN = 0, GROUP_VAR_INT = 1, // Deprecated, never used PLAIN_DICTIONARY = 2, RLE = 3, BIT_PACKED = 4, // Deprecated by parquet-format in 2013, superseded by RLE DELTA_BINARY_PACKED = 5, DELTA_LENGTH_BYTE_ARRAY = 6, DELTA_BYTE_ARRAY = 7, RLE_DICTIONARY = 8, BYTE_STREAM_SPLIT = 9, NUM_ENCODINGS = 10, }; /** * @brief Compression codec used for compressed data pages */ enum Compression { UNCOMPRESSED = 0, SNAPPY = 1, GZIP = 2, LZO = 3, BROTLI = 4, // Added in 2.3.2 LZ4 = 5, // Added in 2.3.2 ZSTD = 6, // Added in 2.3.2 }; /** * @brief Compression codec used for compressed data pages */ enum FieldRepetitionType { NO_REPETITION_TYPE = -1, REQUIRED = 0, // This field is required (can not be null) and each record has exactly 1 value. OPTIONAL = 1, // The field is optional (can be null) and each record has 0 or 1 values. REPEATED = 2, // The field is repeated and can contain 0 or more values }; /** * @brief Types of pages */ enum class PageType : uint8_t { DATA_PAGE = 0, INDEX_PAGE = 1, DICTIONARY_PAGE = 2, DATA_PAGE_V2 = 3, }; /** * @brief Enum to annotate whether lists of min/max elements inside ColumnIndex * are ordered and if so, in which direction. */ enum BoundaryOrder { UNORDERED = 0, ASCENDING = 1, DESCENDING = 2, }; /** * @brief Thrift compact protocol struct field types */ enum FieldType { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, ST_FLD_I16 = 4, ST_FLD_I32 = 5, ST_FLD_I64 = 6, ST_FLD_DOUBLE = 7, ST_FLD_BINARY = 8, ST_FLD_LIST = 9, ST_FLD_SET = 10, ST_FLD_MAP = 11, ST_FLD_STRUCT = 12, }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_data.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "page_decode.cuh" #include <io/utilities/column_buffer.hpp> #include <cudf/hashing/detail/murmurhash3_x86_32.cuh> #include <rmm/exec_policy.hpp> #include <thrust/reduce.h> namespace cudf::io::parquet::detail { namespace { constexpr int decode_block_size = 128; constexpr int rolling_buf_size = decode_block_size * 2; /** * @brief Output a string descriptor * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dstv Pointer to row output data (string descriptor or 32-bit hash) */ template <typename state_buf> inline __device__ void gpuOutputString(page_state_s* s, state_buf* sb, int src_pos, void* dstv) { auto [ptr, len] = gpuGetStringData(s, sb, src_pos); // make sure to only hash `BYTE_ARRAY` when specified with the output type size if (s->dtype_len == 4 and (s->col.data_type & 7) == BYTE_ARRAY) { // Output hash. This hash value is used if the option to convert strings to // categoricals is enabled. The seed value is chosen arbitrarily. uint32_t constexpr hash_seed = 33; cudf::string_view const sv{ptr, static_cast<size_type>(len)}; *static_cast<uint32_t*>(dstv) = cudf::hashing::detail::MurmurHash3_x86_32<cudf::string_view>{hash_seed}(sv); } else { // Output string descriptor auto* dst = static_cast<string_index_pair*>(dstv); dst->first = ptr; dst->second = len; } } /** * @brief Output a boolean * * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputBoolean(state_buf* sb, int src_pos, uint8_t* dst) { *dst = sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)]; } /** * @brief Store a 32-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint32_t* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint32_t bytebuf; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } *dst = bytebuf; } /** * @brief Store a 64-bit data element * * @param[out] dst ptr to output * @param[in] src8 raw input bytes * @param[in] dict_pos byte position in dictionary * @param[in] dict_size size of dictionary */ inline __device__ void gpuStoreOutput(uint2* dst, uint8_t const* src8, uint32_t dict_pos, uint32_t dict_size) { uint2 v; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos < dict_size) { v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } } else { v.x = v.y = 0; } *dst = v; } /** * @brief Convert an INT96 Spark timestamp to 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[out] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt96Timestamp(page_state_s* s, state_buf* sb, int src_pos, int64_t* dst) { using cuda::std::chrono::duration_cast; uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 >= dict_size) { *dst = 0; return; } uint3 v; int64_t nanos, days; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); v.z = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 12); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, v.z, ofs); v.z = __funnelshift_r(v.z, next, ofs); } nanos = v.y; nanos <<= 32; nanos |= v.x; // Convert from Julian day at noon to UTC seconds days = static_cast<int32_t>(v.z); cudf::duration_D d_d{ days - 2440588}; // TBD: Should be noon instead of midnight, but this matches pyarrow *dst = [&]() { switch (s->col.ts_clock_rate) { case 1: // seconds return duration_cast<duration_s>(d_d).count() + duration_cast<duration_s>(duration_ns{nanos}).count(); case 1'000: // milliseconds return duration_cast<duration_ms>(d_d).count() + duration_cast<duration_ms>(duration_ns{nanos}).count(); case 1'000'000: // microseconds return duration_cast<duration_us>(d_d).count() + duration_cast<duration_us>(duration_ns{nanos}).count(); case 1'000'000'000: // nanoseconds default: return duration_cast<cudf::duration_ns>(d_d).count() + nanos; } }(); } /** * @brief Output a 64-bit timestamp * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename state_buf> inline __device__ void gpuOutputInt64Timestamp(page_state_s* s, state_buf* sb, int src_pos, int64_t* dst) { uint8_t const* src8; uint32_t dict_pos, dict_size = s->dict_size, ofs; int64_t ts; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; src8 = s->dict_base; } else { // Plain dict_pos = src_pos; src8 = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits if (dict_pos + 4 < dict_size) { uint2 v; int64_t val; int32_t ts_scale; v.x = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 0); v.y = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); if (ofs) { uint32_t next = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 8); v.x = __funnelshift_r(v.x, v.y, ofs); v.y = __funnelshift_r(v.y, next, ofs); } val = v.y; val <<= 32; val |= v.x; // Output to desired clock rate ts_scale = s->ts_scale; if (ts_scale < 0) { // round towards negative infinity int sign = (val < 0); ts = ((val + sign) / -ts_scale) + sign; } else { ts = val * ts_scale; } } else { ts = 0; } *dst = ts; } /** * @brief Output a byte array as int. * * @param[in] ptr Pointer to the byte array * @param[in] len Byte array length * @param[out] dst Pointer to row output data */ template <typename T> __device__ void gpuOutputByteArrayAsInt(char const* ptr, int32_t len, T* dst) { T unscaled = 0; for (auto i = 0; i < len; i++) { uint8_t v = ptr[i]; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. unscaled <<= (sizeof(T) - len) * 8; unscaled >>= (sizeof(T) - len) * 8; *dst = unscaled; } /** * @brief Output a fixed-length byte array as int. * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> __device__ void gpuOutputFixedLenByteArrayAsInt(page_state_s* s, state_buf* sb, int src_pos, T* dst) { uint32_t const dtype_len_in = s->dtype_len_in; uint8_t const* data = s->dict_base ? s->dict_base : s->data_start; uint32_t const pos = (s->dict_base ? ((s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0) : src_pos) * dtype_len_in; uint32_t const dict_size = s->dict_size; T unscaled = 0; for (unsigned int i = 0; i < dtype_len_in; i++) { uint32_t v = (pos + i < dict_size) ? data[pos + i] : 0; unscaled = (unscaled << 8) | v; } // Shift the unscaled value up and back down when it isn't all 8 bytes, // which sign extend the value for correctly representing negative numbers. if (dtype_len_in < sizeof(T)) { unscaled <<= (sizeof(T) - dtype_len_in) * 8; unscaled >>= (sizeof(T) - dtype_len_in) * 8; } *dst = unscaled; } /** * @brief Output a small fixed-length value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst Pointer to row output data */ template <typename T, typename state_buf> inline __device__ void gpuOutputFast(page_state_s* s, state_buf* sb, int src_pos, T* dst) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; gpuStoreOutput(dst, dict, dict_pos, dict_size); } /** * @brief Output a N-byte value * * @param[in,out] s Page state input/output * @param[out] sb Page state buffer output * @param[in] src_pos Source position * @param[in] dst8 Pointer to row output data * @param[in] len Length of element */ template <typename state_buf> static __device__ void gpuOutputGeneric( page_state_s* s, state_buf* sb, int src_pos, uint8_t* dst8, int len) { uint8_t const* dict; uint32_t dict_pos, dict_size = s->dict_size; if (s->dict_base) { // Dictionary dict_pos = (s->dict_bits > 0) ? sb->dict_idx[rolling_index<state_buf::dict_buf_size>(src_pos)] : 0; dict = s->dict_base; } else { // Plain dict_pos = src_pos; dict = s->data_start; } dict_pos *= (uint32_t)s->dtype_len_in; if (len & 3) { // Generic slow path for (unsigned int i = 0; i < len; i++) { dst8[i] = (dict_pos + i < dict_size) ? dict[dict_pos + i] : 0; } } else { // Copy 4 bytes at a time uint8_t const* src8 = dict; unsigned int ofs = 3 & reinterpret_cast<size_t>(src8); src8 -= ofs; // align to 32-bit boundary ofs <<= 3; // bytes -> bits for (unsigned int i = 0; i < len; i += 4) { uint32_t bytebuf; if (dict_pos < dict_size) { bytebuf = *reinterpret_cast<uint32_t const*>(src8 + dict_pos); if (ofs) { uint32_t bytebufnext = *reinterpret_cast<uint32_t const*>(src8 + dict_pos + 4); bytebuf = __funnelshift_r(bytebuf, bytebufnext, ofs); } } else { bytebuf = 0; } dict_pos += 4; *reinterpret_cast<uint32_t*>(dst8 + i) = bytebuf; } } } /** * @brief Kernel for computing the column data stored in the pages * * This function will write the page data and the page data's validity to the * output specified in the page's column chunk. If necessary, additional * conversion will be performed to translate from the Parquet datatype to * desired output datatype (ex. 32-bit to 16-bit, string to hash). * * @param pages List of pages * @param chunks List of column chunks * @param min_row Row index to start reading at * @param num_rows Maximum number of rows to read * @param error_code Error code to set if an error is encountered */ template <int lvl_buf_size, typename level_t> __global__ void __launch_bounds__(decode_block_size) gpuDecodePageData(PageInfo* pages, device_span<ColumnChunkDesc const> chunks, size_t min_row, size_t num_rows, int32_t* error_code) { __shared__ __align__(16) page_state_s state_g; __shared__ __align__(16) page_state_buffers_s<rolling_buf_size, rolling_buf_size, rolling_buf_size> state_buffers; page_state_s* const s = &state_g; auto* const sb = &state_buffers; int page_idx = blockIdx.x; int t = threadIdx.x; int out_thread0; [[maybe_unused]] null_count_back_copier _{s, t}; if (!setupLocalPageInfo(s, &pages[page_idx], chunks, min_row, num_rows, mask_filter{decode_kernel_mask::GENERAL}, true)) { return; } bool const has_repetition = s->col.max_level[level_type::REPETITION] > 0; if (s->dict_base) { out_thread0 = (s->dict_bits > 0) ? 64 : 32; } else { switch (s->col.data_type & 7) { case BOOLEAN: [[fallthrough]]; case BYTE_ARRAY: [[fallthrough]]; case FIXED_LEN_BYTE_ARRAY: out_thread0 = 64; break; default: out_thread0 = 32; } } PageNestingDecodeInfo* nesting_info_base = s->nesting_info; __shared__ level_t rep[rolling_buf_size]; // circular buffer of repetition level values __shared__ level_t def[rolling_buf_size]; // circular buffer of definition level values // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t skipped_leaf_values = s->page.skipped_leaf_values; while (s->error == 0 && (s->input_value_count < s->num_input_values || s->src_pos < s->nz_count)) { int target_pos; int src_pos = s->src_pos; if (t < out_thread0) { target_pos = min(src_pos + 2 * (decode_block_size - out_thread0), s->nz_count + (decode_block_size - out_thread0)); } else { target_pos = min(s->nz_count, src_pos + decode_block_size - out_thread0); if (out_thread0 > 32) { target_pos = min(target_pos, s->dict_pos); } } // TODO(ets): see if this sync can be removed __syncthreads(); if (t < 32) { // decode repetition and definition levels. // - update validity vectors // - updates offsets (for nested columns) // - produces non-NULL value indices in s->nz_idx for subsequent decoding gpuDecodeLevels<lvl_buf_size, level_t>(s, sb, target_pos, rep, def, t); } else if (t < out_thread0) { // skipped_leaf_values will always be 0 for flat hierarchies. uint32_t src_target_pos = target_pos + skipped_leaf_values; // WARP1: Decode dictionary indices, booleans or string positions if (s->dict_base) { src_target_pos = gpuDecodeDictionaryIndices<false>(s, sb, src_target_pos, t & 0x1f).first; } else if ((s->col.data_type & 7) == BOOLEAN) { src_target_pos = gpuDecodeRleBooleans(s, sb, src_target_pos, t & 0x1f); } else if ((s->col.data_type & 7) == BYTE_ARRAY or (s->col.data_type & 7) == FIXED_LEN_BYTE_ARRAY) { gpuInitStringDescriptors<false>(s, sb, src_target_pos, t & 0x1f); } if (t == 32) { s->dict_pos = src_target_pos; } } else { // WARP1..WARP3: Decode values int const dtype = s->col.data_type & 7; src_pos += t - out_thread0; // the position in the output column/buffer int dst_pos = sb->nz_idx[rolling_index<rolling_buf_size>(src_pos)]; // for the flat hierarchy case we will be reading from the beginning of the value stream, // regardless of the value of first_row. so adjust our destination offset accordingly. // example: // - user has passed skip_rows = 2, so our first_row to output is 2 // - the row values we get from nz_idx will be // 0, 1, 2, 3, 4 .... // - by shifting these values by first_row, the sequence becomes // -1, -2, 0, 1, 2 ... // - so we will end up ignoring the first two input rows, and input rows 2..n will // get written to the output starting at position 0. // if (!has_repetition) { dst_pos -= s->first_row; } // target_pos will always be properly bounded by num_rows, but dst_pos may be negative (values // before first_row) in the flat hierarchy case. if (src_pos < target_pos && dst_pos >= 0) { // src_pos represents the logical row position we want to read from. But in the case of // nested hierarchies, there is no 1:1 mapping of rows to values. So our true read position // has to take into account the # of values we have to skip in the page to get to the // desired logical row. For flat hierarchies, skipped_leaf_values will always be 0. uint32_t val_src_pos = src_pos + skipped_leaf_values; // nesting level that is storing actual leaf values int leaf_level_index = s->col.max_nesting_depth - 1; uint32_t dtype_len = s->dtype_len; void* dst = nesting_info_base[leaf_level_index].data_out + static_cast<size_t>(dst_pos) * dtype_len; if (dtype == BYTE_ARRAY) { if (s->col.converted_type == DECIMAL) { auto const [ptr, len] = gpuGetStringData(s, sb, val_src_pos); auto const decimal_precision = s->col.decimal_precision; if (decimal_precision <= MAX_DECIMAL32_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int32_t*>(dst)); } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { gpuOutputByteArrayAsInt(ptr, len, static_cast<int64_t*>(dst)); } else { gpuOutputByteArrayAsInt(ptr, len, static_cast<__int128_t*>(dst)); } } else { gpuOutputString(s, sb, val_src_pos, dst); } } else if (dtype == BOOLEAN) { gpuOutputBoolean(sb, val_src_pos, static_cast<uint8_t*>(dst)); } else if (s->col.converted_type == DECIMAL) { switch (dtype) { case INT32: gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); break; case INT64: gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); break; default: if (s->dtype_len_in <= sizeof(int32_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int32_t*>(dst)); } else if (s->dtype_len_in <= sizeof(int64_t)) { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFixedLenByteArrayAsInt(s, sb, val_src_pos, static_cast<__int128_t*>(dst)); } break; } } else if (dtype == FIXED_LEN_BYTE_ARRAY) { gpuOutputString(s, sb, val_src_pos, dst); } else if (dtype == INT96) { gpuOutputInt96Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else if (dtype_len == 8) { if (s->dtype_len_in == 4) { // Reading INT32 TIME_MILLIS into 64-bit DURATION_MILLISECONDS // TIME_MILLIS is the only duration type stored as int32: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#deprecated-time-convertedtype gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else if (s->ts_scale) { gpuOutputInt64Timestamp(s, sb, val_src_pos, static_cast<int64_t*>(dst)); } else { gpuOutputFast(s, sb, val_src_pos, static_cast<uint2*>(dst)); } } else if (dtype_len == 4) { gpuOutputFast(s, sb, val_src_pos, static_cast<uint32_t*>(dst)); } else { gpuOutputGeneric(s, sb, val_src_pos, static_cast<uint8_t*>(dst), dtype_len); } } if (t == out_thread0) { s->src_pos = target_pos; } } __syncthreads(); } if (t == 0 and s->error != 0) { set_error(s->error, error_code); } } struct mask_tform { __device__ uint32_t operator()(PageInfo const& p) { return static_cast<uint32_t>(p.kernel_mask); } }; } // anonymous namespace uint32_t GetAggregatedDecodeKernelMask(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream) { // determine which kernels to invoke auto mask_iter = thrust::make_transform_iterator(pages.d_begin(), mask_tform{}); return thrust::reduce( rmm::exec_policy(stream), mask_iter, mask_iter + pages.size(), 0U, thrust::bit_or<uint32_t>{}); } /** * @copydoc cudf::io::parquet::detail::DecodePageData */ void __host__ DecodePageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream) { CUDF_EXPECTS(pages.size() > 0, "There is no page to decode"); dim3 dim_block(decode_block_size, 1); dim3 dim_grid(pages.size(), 1); // 1 threadblock per page if (level_type_size == 1) { gpuDecodePageData<rolling_buf_size, uint8_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } else { gpuDecodePageData<rolling_buf_size, uint16_t><<<dim_grid, dim_block, 0, stream.value()>>>( pages.device_ptr(), chunks, min_row, num_rows, error_code); } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl_helpers.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "compact_protocol_reader.hpp" #include "parquet_gpu.hpp" #include <cudf/ast/detail/expression_transformer.hpp> #include <cudf/ast/expressions.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/io/datasource.hpp> #include <cudf/types.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <list> #include <tuple> #include <vector> namespace cudf::io::parquet::detail { /** * @brief The row_group_info class */ struct row_group_info { size_type index; // row group index within a file. aggregate_reader_metadata::get_row_group() is // called with index and source_index size_t start_row; size_type source_index; // file index. row_group_info() = default; row_group_info(size_type index, size_t start_row, size_type source_index) : index{index}, start_row{start_row}, source_index{source_index} { } }; /** * @brief Function that translates Parquet datatype to cuDF type enum */ [[nodiscard]] type_id to_type_id(SchemaElement const& schema, bool strings_to_categorical, type_id timestamp_type_id); /** * @brief Converts cuDF type enum to column logical type */ [[nodiscard]] inline data_type to_data_type(type_id t_id, SchemaElement const& schema) { return t_id == type_id::DECIMAL32 || t_id == type_id::DECIMAL64 || t_id == type_id::DECIMAL128 ? data_type{t_id, numeric::scale_type{-schema.decimal_scale}} : data_type{t_id}; } /** * @brief Class for parsing dataset metadata */ struct metadata : public FileMetaData { explicit metadata(datasource* source); void sanitize_schema(); }; class aggregate_reader_metadata { std::vector<metadata> per_file_metadata; std::vector<std::unordered_map<std::string, std::string>> keyval_maps; int64_t num_rows; size_type num_row_groups; /** * @brief Create a metadata object from each element in the source vector */ static std::vector<metadata> metadatas_from_sources( host_span<std::unique_ptr<datasource> const> sources); /** * @brief Collect the keyvalue maps from each per-file metadata object into a vector of maps. */ [[nodiscard]] std::vector<std::unordered_map<std::string, std::string>> collect_keyval_metadata() const; /** * @brief Sums up the number of rows of each source */ [[nodiscard]] int64_t calc_num_rows() const; /** * @brief Sums up the number of row groups of each source */ [[nodiscard]] size_type calc_num_row_groups() const; public: aggregate_reader_metadata(host_span<std::unique_ptr<datasource> const> sources); [[nodiscard]] RowGroup const& get_row_group(size_type row_group_index, size_type src_idx) const; [[nodiscard]] ColumnChunkMetaData const& get_column_metadata(size_type row_group_index, size_type src_idx, int schema_idx) const; [[nodiscard]] auto get_num_rows() const { return num_rows; } [[nodiscard]] auto get_num_row_groups() const { return num_row_groups; } [[nodiscard]] auto const& get_schema(int schema_idx) const { return per_file_metadata[0].schema[schema_idx]; } [[nodiscard]] auto const& get_key_value_metadata() const& { return keyval_maps; } [[nodiscard]] auto&& get_key_value_metadata() && { return std::move(keyval_maps); } /** * @brief Gets the concrete nesting depth of output cudf columns * * @param schema_index Schema index of the input column * * @return comma-separated index column names in quotes */ [[nodiscard]] inline int get_output_nesting_depth(int schema_index) const { auto& pfm = per_file_metadata[0]; int depth = 0; // walk upwards, skipping repeated fields while (schema_index > 0) { auto const& elm = pfm.schema[schema_index]; if (!elm.is_stub()) { depth++; } // schema of one-level encoding list doesn't contain nesting information, so we need to // manually add an extra nesting level if (elm.is_one_level_list(pfm.schema[elm.parent_idx])) { depth++; } schema_index = elm.parent_idx; } return depth; } /** * @brief Extracts the pandas "index_columns" section * * PANDAS adds its own metadata to the key_value section when writing out the * dataframe to a file to aid in exact reconstruction. The JSON-formatted * metadata contains the index column(s) and PANDA-specific datatypes. * * @return comma-separated index column names in quotes */ [[nodiscard]] std::string get_pandas_index() const; /** * @brief Extracts the column name(s) used for the row indexes in a dataframe * * @param names List of column names to load, where index column name(s) will be added */ [[nodiscard]] std::vector<std::string> get_pandas_index_names() const; /** * @brief Filters the row groups based on predicate filter * * @param row_group_indices Lists of row groups to read, one per source * @param output_dtypes List of output column datatypes * @param filter AST expression to filter row groups based on Column chunk statistics * @param stream CUDA stream used for device memory operations and kernel launches * @return Filtered row group indices, if any is filtered. */ [[nodiscard]] std::optional<std::vector<std::vector<size_type>>> filter_row_groups( host_span<std::vector<size_type> const> row_group_indices, host_span<data_type const> output_dtypes, std::reference_wrapper<ast::expression const> filter, rmm::cuda_stream_view stream) const; /** * @brief Filters and reduces down to a selection of row groups * * The input `row_start` and `row_count` parameters will be recomputed and output as the valid * values based on the input row group list. * * @param row_group_indices Lists of row groups to read, one per source * @param row_start Starting row of the selection * @param row_count Total number of rows selected * @param output_dtypes List of output column datatypes * @param filter Optional AST expression to filter row groups based on Column chunk statistics * @param stream CUDA stream used for device memory operations and kernel launches * @return A tuple of corrected row_start, row_count and list of row group indexes and its * starting row */ [[nodiscard]] std::tuple<int64_t, size_type, std::vector<row_group_info>> select_row_groups( host_span<std::vector<size_type> const> row_group_indices, int64_t row_start, std::optional<size_type> const& row_count, host_span<data_type const> output_dtypes, std::optional<std::reference_wrapper<ast::expression const>> filter, rmm::cuda_stream_view stream) const; /** * @brief Filters and reduces down to a selection of columns * * @param use_names List of paths of column names to select; `nullopt` if user did not select * columns to read * @param include_index Whether to always include the PANDAS index column(s) * @param strings_to_categorical Type conversion parameter * @param timestamp_type_id Type conversion parameter * * @return input column information, output column information, list of output column schema * indices */ [[nodiscard]] std::tuple<std::vector<input_column_info>, std::vector<cudf::io::detail::inline_column_buffer>, std::vector<size_type>> select_columns(std::optional<std::vector<std::string>> const& use_names, bool include_index, bool strings_to_categorical, type_id timestamp_type_id) const; }; /** * @brief Converts named columns to index reference columns * */ class named_to_reference_converter : public ast::detail::expression_transformer { public: named_to_reference_converter(std::optional<std::reference_wrapper<ast::expression const>> expr, table_metadata const& metadata) : metadata(metadata) { if (!expr.has_value()) return; // create map for column name. std::transform( thrust::make_zip_iterator(metadata.schema_info.cbegin(), thrust::counting_iterator<size_t>(0)), thrust::make_zip_iterator(metadata.schema_info.cend(), thrust::counting_iterator(metadata.schema_info.size())), std::inserter(column_name_to_index, column_name_to_index.end()), [](auto const& name_index) { return std::make_pair(thrust::get<0>(name_index).name, thrust::get<1>(name_index)); }); expr.value().get().accept(*this); } /** * @copydoc ast::detail::expression_transformer::visit(ast::literal const& ) */ std::reference_wrapper<ast::expression const> visit(ast::literal const& expr) override; /** * @copydoc ast::detail::expression_transformer::visit(ast::column_reference const& ) */ std::reference_wrapper<ast::expression const> visit(ast::column_reference const& expr) override; /** * @copydoc ast::detail::expression_transformer::visit(ast::column_name_reference const& ) */ std::reference_wrapper<ast::expression const> visit( ast::column_name_reference const& expr) override; /** * @copydoc ast::detail::expression_transformer::visit(ast::operation const& ) */ std::reference_wrapper<ast::expression const> visit(ast::operation const& expr) override; /** * @brief Returns the AST to apply on Column chunk statistics. * * @return AST operation expression */ [[nodiscard]] std::optional<std::reference_wrapper<ast::expression const>> get_converted_expr() const { return _stats_expr; } private: std::vector<std::reference_wrapper<ast::expression const>> visit_operands( std::vector<std::reference_wrapper<ast::expression const>> operands); table_metadata const& metadata; std::unordered_map<std::string, size_type> column_name_to_index; std::optional<std::reference_wrapper<ast::expression const>> _stats_expr; // Using std::list or std::deque to avoid reference invalidation std::list<ast::column_reference> _col_ref; std::list<ast::operation> _operators; }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/parquet_gpu.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet_gpu.hpp" #include <cudf/lists/lists_column_device_view.cuh> #include <cudf/types.hpp> #include <cuco/static_map.cuh> namespace cudf::io::parquet::detail { auto constexpr KEY_SENTINEL = size_type{-1}; auto constexpr VALUE_SENTINEL = size_type{-1}; using map_type = cuco::static_map<size_type, size_type>; /** * @brief The alias of `map_type::pair_atomic_type` class. * * Declare this struct by trivial subclassing instead of type aliasing so we can have forward * declaration of this struct somewhere else. */ struct slot_type : public map_type::pair_atomic_type {}; /** * @brief Return the byte length of parquet dtypes that are physically represented by INT32 */ inline uint32_t __device__ int32_logical_len(type_id id) { switch (id) { case cudf::type_id::INT8: [[fallthrough]]; case cudf::type_id::UINT8: return 1; case cudf::type_id::INT16: [[fallthrough]]; case cudf::type_id::UINT16: return 2; case cudf::type_id::DURATION_SECONDS: [[fallthrough]]; case cudf::type_id::DURATION_MILLISECONDS: return 8; default: return 4; } } /** * @brief Translate the row index of a parent column_device_view into the index of the first value * in the leaf child. * Only works in the context of parquet writer where struct columns are previously modified s.t. * they only have one immediate child. */ inline size_type __device__ row_to_value_idx(size_type idx, parquet_column_device_view const& parquet_col) { // with a byte array, we can't go all the way down to the leaf node, but instead we want to leave // the size at the parent level because we are writing out parent row byte arrays. auto col = *parquet_col.parent_column; while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) { if (col.type().id() == type_id::STRUCT) { idx += col.offset(); col = col.child(0); } else { auto list_col = cudf::detail::lists_column_device_view(col); auto child = list_col.child(); if (parquet_col.output_as_byte_array && child.type().id() == type_id::UINT8) { break; } idx = list_col.offset_at(idx); col = child; } } return idx; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/compact_protocol_writer.cpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compact_protocol_writer.hpp" #include <cudf/utilities/error.hpp> namespace cudf::io::parquet::detail { /** * @brief Parquet CompactProtocolWriter class */ size_t CompactProtocolWriter::write(FileMetaData const& f) { CompactProtocolFieldWriter c(*this); c.field_int(1, f.version); c.field_struct_list(2, f.schema); c.field_int(3, f.num_rows); c.field_struct_list(4, f.row_groups); if (not f.key_value_metadata.empty()) { c.field_struct_list(5, f.key_value_metadata); } if (not f.created_by.empty()) { c.field_string(6, f.created_by); } if (f.column_orders.has_value()) { c.field_struct_list(7, f.column_orders.value()); } return c.value(); } size_t CompactProtocolWriter::write(DecimalType const& decimal) { CompactProtocolFieldWriter c(*this); c.field_int(1, decimal.scale); c.field_int(2, decimal.precision); return c.value(); } size_t CompactProtocolWriter::write(TimeUnit const& time_unit) { CompactProtocolFieldWriter c(*this); switch (time_unit.type) { case TimeUnit::MILLIS: case TimeUnit::MICROS: case TimeUnit::NANOS: c.field_empty_struct(time_unit.type); break; default: CUDF_FAIL("Trying to write an invalid TimeUnit " + std::to_string(time_unit.type)); } return c.value(); } size_t CompactProtocolWriter::write(TimeType const& time) { CompactProtocolFieldWriter c(*this); c.field_bool(1, time.isAdjustedToUTC); c.field_struct(2, time.unit); return c.value(); } size_t CompactProtocolWriter::write(TimestampType const& timestamp) { CompactProtocolFieldWriter c(*this); c.field_bool(1, timestamp.isAdjustedToUTC); c.field_struct(2, timestamp.unit); return c.value(); } size_t CompactProtocolWriter::write(IntType const& integer) { CompactProtocolFieldWriter c(*this); c.field_int8(1, integer.bitWidth); c.field_bool(2, integer.isSigned); return c.value(); } size_t CompactProtocolWriter::write(LogicalType const& logical_type) { CompactProtocolFieldWriter c(*this); switch (logical_type.type) { case LogicalType::STRING: case LogicalType::MAP: case LogicalType::LIST: case LogicalType::ENUM: case LogicalType::DATE: case LogicalType::UNKNOWN: case LogicalType::JSON: case LogicalType::BSON: c.field_empty_struct(logical_type.type); break; case LogicalType::DECIMAL: c.field_struct(LogicalType::DECIMAL, logical_type.decimal_type.value()); break; case LogicalType::TIME: c.field_struct(LogicalType::TIME, logical_type.time_type.value()); break; case LogicalType::TIMESTAMP: c.field_struct(LogicalType::TIMESTAMP, logical_type.timestamp_type.value()); break; case LogicalType::INTEGER: c.field_struct(LogicalType::INTEGER, logical_type.int_type.value()); break; default: CUDF_FAIL("Trying to write an invalid LogicalType " + std::to_string(logical_type.type)); } return c.value(); } size_t CompactProtocolWriter::write(SchemaElement const& s) { CompactProtocolFieldWriter c(*this); if (s.type != UNDEFINED_TYPE) { c.field_int(1, s.type); if (s.type_length != 0) { c.field_int(2, s.type_length); } } if (s.repetition_type != NO_REPETITION_TYPE) { c.field_int(3, s.repetition_type); } c.field_string(4, s.name); if (s.type == UNDEFINED_TYPE) { c.field_int(5, s.num_children); } if (s.converted_type.has_value()) { c.field_int(6, s.converted_type.value()); if (s.converted_type == DECIMAL) { c.field_int(7, s.decimal_scale); c.field_int(8, s.decimal_precision); } } if (s.field_id.has_value()) { c.field_int(9, s.field_id.value()); } if (s.logical_type.has_value()) { c.field_struct(10, s.logical_type.value()); } return c.value(); } size_t CompactProtocolWriter::write(RowGroup const& r) { CompactProtocolFieldWriter c(*this); c.field_struct_list(1, r.columns); c.field_int(2, r.total_byte_size); c.field_int(3, r.num_rows); return c.value(); } size_t CompactProtocolWriter::write(KeyValue const& k) { CompactProtocolFieldWriter c(*this); c.field_string(1, k.key); if (not k.value.empty()) { c.field_string(2, k.value); } return c.value(); } size_t CompactProtocolWriter::write(ColumnChunk const& s) { CompactProtocolFieldWriter c(*this); if (not s.file_path.empty()) { c.field_string(1, s.file_path); } c.field_int(2, s.file_offset); c.field_struct(3, s.meta_data); if (s.offset_index_length != 0) { c.field_int(4, s.offset_index_offset); c.field_int(5, s.offset_index_length); } if (s.column_index_length != 0) { c.field_int(6, s.column_index_offset); c.field_int(7, s.column_index_length); } return c.value(); } size_t CompactProtocolWriter::write(ColumnChunkMetaData const& s) { CompactProtocolFieldWriter c(*this); c.field_int(1, s.type); c.field_int_list(2, s.encodings); c.field_string_list(3, s.path_in_schema); c.field_int(4, s.codec); c.field_int(5, s.num_values); c.field_int(6, s.total_uncompressed_size); c.field_int(7, s.total_compressed_size); c.field_int(9, s.data_page_offset); if (s.index_page_offset != 0) { c.field_int(10, s.index_page_offset); } if (s.dictionary_page_offset != 0) { c.field_int(11, s.dictionary_page_offset); } c.field_struct(12, s.statistics); return c.value(); } size_t CompactProtocolWriter::write(Statistics const& s) { CompactProtocolFieldWriter c(*this); if (s.max.has_value()) { c.field_binary(1, s.max.value()); } if (s.min.has_value()) { c.field_binary(2, s.min.value()); } if (s.null_count.has_value()) { c.field_int(3, s.null_count.value()); } if (s.distinct_count.has_value()) { c.field_int(4, s.distinct_count.value()); } if (s.max_value.has_value()) { c.field_binary(5, s.max_value.value()); } if (s.min_value.has_value()) { c.field_binary(6, s.min_value.value()); } return c.value(); } size_t CompactProtocolWriter::write(PageLocation const& s) { CompactProtocolFieldWriter c(*this); c.field_int(1, s.offset); c.field_int(2, s.compressed_page_size); c.field_int(3, s.first_row_index); return c.value(); } size_t CompactProtocolWriter::write(OffsetIndex const& s) { CompactProtocolFieldWriter c(*this); c.field_struct_list(1, s.page_locations); return c.value(); } size_t CompactProtocolWriter::write(ColumnOrder const& co) { CompactProtocolFieldWriter c(*this); switch (co.type) { case ColumnOrder::TYPE_ORDER: c.field_empty_struct(co.type); break; default: CUDF_FAIL("Trying to write an invalid ColumnOrder " + std::to_string(co.type)); } return c.value(); } void CompactProtocolFieldWriter::put_byte(uint8_t v) { writer.m_buf.push_back(v); } void CompactProtocolFieldWriter::put_byte(uint8_t const* raw, uint32_t len) { for (uint32_t i = 0; i < len; i++) writer.m_buf.push_back(raw[i]); } uint32_t CompactProtocolFieldWriter::put_uint(uint64_t v) { int l = 1; while (v > 0x7f) { put_byte(static_cast<uint8_t>(v | 0x80)); v >>= 7; l++; } put_byte(static_cast<uint8_t>(v)); return l; } uint32_t CompactProtocolFieldWriter::put_int(int64_t v) { int64_t s = (v < 0); return put_uint(((v ^ -s) << 1) + s); } void CompactProtocolFieldWriter::put_field_header(int f, int cur, int t) { if (f > cur && f <= cur + 15) put_byte(((f - cur) << 4) | t); else { put_byte(t); put_int(f); } } inline void CompactProtocolFieldWriter::field_bool(int field, bool b) { put_field_header(field, current_field_value, b ? ST_FLD_TRUE : ST_FLD_FALSE); current_field_value = field; } inline void CompactProtocolFieldWriter::field_int8(int field, int8_t val) { put_field_header(field, current_field_value, ST_FLD_BYTE); put_byte(val); current_field_value = field; } inline void CompactProtocolFieldWriter::field_int(int field, int32_t val) { put_field_header(field, current_field_value, ST_FLD_I32); put_int(val); current_field_value = field; } inline void CompactProtocolFieldWriter::field_int(int field, int64_t val) { put_field_header(field, current_field_value, ST_FLD_I64); put_int(val); current_field_value = field; } template <typename Enum> inline void CompactProtocolFieldWriter::field_int_list(int field, std::vector<Enum> const& val) { put_field_header(field, current_field_value, ST_FLD_LIST); put_byte((uint8_t)((std::min(val.size(), (size_t)0xfu) << 4) | ST_FLD_I32)); if (val.size() >= 0xf) put_uint(val.size()); for (auto& v : val) { put_int(static_cast<int32_t>(v)); } current_field_value = field; } template <typename T> inline void CompactProtocolFieldWriter::field_struct(int field, T const& val) { put_field_header(field, current_field_value, ST_FLD_STRUCT); if constexpr (not std::is_empty_v<T>) { writer.write(val); // write the struct if it's not empty } else { put_byte(0); // otherwise, add a stop field } current_field_value = field; } inline void CompactProtocolFieldWriter::field_empty_struct(int field) { put_field_header(field, current_field_value, ST_FLD_STRUCT); put_byte(0); // add a stop field current_field_value = field; } template <typename T> inline void CompactProtocolFieldWriter::field_struct_list(int field, std::vector<T> const& val) { put_field_header(field, current_field_value, ST_FLD_LIST); put_byte((uint8_t)((std::min(val.size(), (size_t)0xfu) << 4) | ST_FLD_STRUCT)); if (val.size() >= 0xf) put_uint(val.size()); for (auto& v : val) { writer.write(v); } current_field_value = field; } inline size_t CompactProtocolFieldWriter::value() { put_byte(0); return writer.m_buf.size() - struct_start_pos; } inline void CompactProtocolFieldWriter::field_struct_blob(int field, std::vector<uint8_t> const& val) { put_field_header(field, current_field_value, ST_FLD_STRUCT); put_byte(val.data(), static_cast<uint32_t>(val.size())); put_byte(0); current_field_value = field; } inline void CompactProtocolFieldWriter::field_binary(int field, std::vector<uint8_t> const& val) { put_field_header(field, current_field_value, ST_FLD_BINARY); put_uint(val.size()); put_byte(val.data(), static_cast<uint32_t>(val.size())); current_field_value = field; } inline void CompactProtocolFieldWriter::field_string(int field, std::string const& val) { put_field_header(field, current_field_value, ST_FLD_BINARY); put_uint(val.size()); // FIXME : replace reinterpret_cast put_byte(reinterpret_cast<uint8_t const*>(val.data()), static_cast<uint32_t>(val.size())); current_field_value = field; } inline void CompactProtocolFieldWriter::field_string_list(int field, std::vector<std::string> const& val) { put_field_header(field, current_field_value, ST_FLD_LIST); put_byte((uint8_t)((std::min(val.size(), (size_t)0xfu) << 4) | ST_FLD_BINARY)); if (val.size() >= 0xf) put_uint(val.size()); for (auto& v : val) { put_uint(v.size()); // FIXME : replace reinterpret_cast put_byte(reinterpret_cast<uint8_t const*>(v.data()), static_cast<uint32_t>(v.size())); } current_field_value = field; } inline int CompactProtocolFieldWriter::current_field() { return current_field_value; } inline void CompactProtocolFieldWriter::set_current_field(int const& field) { current_field_value = field; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/delta_enc.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet_gpu.hpp" #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cub/cub.cuh> namespace cudf::io::parquet::detail { namespace delta { inline __device__ void put_uleb128(uint8_t*& p, uleb128_t v) { while (v > 0x7f) { *(p++) = v | 0x80; v >>= 7; } *(p++) = v; } inline __device__ void put_zz128(uint8_t*& p, zigzag128_t v) { zigzag128_t s = (v < 0); put_uleb128(p, (v ^ -s) * 2 + s); } // A block size of 128, with 4 mini-blocks of 32 values each fits nicely without consuming // too much shared memory. // The parquet spec requires block_size to be a multiple of 128, and values_per_mini_block // to be a multiple of 32. // TODO: if these are ever made configurable, be sure to fix the page size calculation in // delta_data_len() (page_enc.cu). constexpr int block_size = 128; constexpr int num_mini_blocks = 4; constexpr int values_per_mini_block = block_size / num_mini_blocks; constexpr int buffer_size = 2 * block_size; // An extra sanity checks to enforce compliance with the parquet specification. static_assert(block_size % 128 == 0); static_assert(values_per_mini_block % 32 == 0); using block_reduce = cub::BlockReduce<zigzag128_t, block_size>; using warp_reduce = cub::WarpReduce<uleb128_t>; using index_scan = cub::BlockScan<size_type, block_size>; constexpr int rolling_idx(int index) { return rolling_index<buffer_size>(index); } // Version of bit packer that can handle up to 64 bits values. // T is the type to use for processing. if nbits <= 32 use uint32_t, otherwise unsigned long long // (not uint64_t because of atomicOr's typing). allowing this to be selectable since there's a // measurable impact to using the wider types. template <typename scratch_type> inline __device__ void bitpack_mini_block( uint8_t* dst, uleb128_t val, uint32_t count, uint8_t nbits, void* temp_space) { using wide_type = std::conditional_t<std::is_same_v<scratch_type, unsigned long long>, __uint128_t, uint64_t>; using cudf::detail::warp_size; scratch_type constexpr mask = sizeof(scratch_type) * 8 - 1; auto constexpr div = sizeof(scratch_type) * 8; auto const lane_id = threadIdx.x % warp_size; auto const warp_id = threadIdx.x / warp_size; auto const scratch = reinterpret_cast<scratch_type*>(temp_space) + warp_id * warp_size; // zero out scratch scratch[lane_id] = 0; __syncwarp(); // TODO: see if there is any savings using special packing for easy bitwidths (1,2,4,8,16...) // like what's done for the RLE encoder. if (nbits == div) { if (lane_id < count) { for (int i = 0; i < sizeof(scratch_type); i++) { dst[lane_id * sizeof(scratch_type) + i] = val & 0xff; val >>= 8; } } return; } if (lane_id <= count) { // Shift symbol left by up to mask bits. wide_type v2 = val; v2 <<= (lane_id * nbits) & mask; // Copy N bit word into two N/2 bit words while following C++ strict aliasing rules. scratch_type v1[2]; memcpy(&v1, &v2, sizeof(wide_type)); // Atomically write result to scratch. if (v1[0]) { atomicOr(scratch + ((lane_id * nbits) / div), v1[0]); } if (v1[1]) { atomicOr(scratch + ((lane_id * nbits) / div) + 1, v1[1]); } } __syncwarp(); // Copy scratch data to final destination. auto const available_bytes = util::div_rounding_up_safe(count * nbits, 8U); auto const scratch_bytes = reinterpret_cast<uint8_t const*>(scratch); for (uint32_t i = lane_id; i < available_bytes; i += warp_size) { dst[i] = scratch_bytes[i]; } __syncwarp(); } } // namespace delta // Object used to turn a stream of integers into a DELTA_BINARY_PACKED stream. This takes as input // 128 values with validity at a time, saving them until there are enough values for a block // to be written. // T is the input data type (either zigzag128_t or uleb128_t). template <typename T> class delta_binary_packer { private: uint8_t* _dst; // sink to dump encoded values to T* _buffer; // buffer to store values to be encoded size_type _current_idx; // index of first value in buffer uint32_t _num_values; // total number of values to encode size_type _values_in_buffer; // current number of values stored in _buffer uint8_t _mb_bits[delta::num_mini_blocks]; // bitwidth for each mini-block // pointers to shared scratch memory for the warp and block scans/reduces delta::index_scan::TempStorage* _scan_tmp; delta::warp_reduce::TempStorage* _warp_tmp; delta::block_reduce::TempStorage* _block_tmp; void* _bitpack_tmp; // pointer to shared scratch memory used in bitpacking // Write the delta binary header. Only call from thread 0. inline __device__ void write_header() { delta::put_uleb128(_dst, delta::block_size); delta::put_uleb128(_dst, delta::num_mini_blocks); delta::put_uleb128(_dst, _num_values); delta::put_zz128(_dst, _buffer[0]); } // Write the block header. Only call from thread 0. inline __device__ void write_block_header(zigzag128_t block_min) { delta::put_zz128(_dst, block_min); memcpy(_dst, _mb_bits, 4); _dst += 4; } // Signed subtraction with defined wrapping behavior. inline __device__ zigzag128_t subtract(zigzag128_t a, zigzag128_t b) { return static_cast<zigzag128_t>(static_cast<uleb128_t>(a) - static_cast<uleb128_t>(b)); } public: inline __device__ auto num_values() const { return _num_values; } // Initialize the object. Only call from thread 0. inline __device__ void init(uint8_t* dest, uint32_t num_values, T* buffer, void* temp_storage) { _dst = dest; _num_values = num_values; _buffer = buffer; _scan_tmp = reinterpret_cast<delta::index_scan::TempStorage*>(temp_storage); _warp_tmp = reinterpret_cast<delta::warp_reduce::TempStorage*>(temp_storage); _block_tmp = reinterpret_cast<delta::block_reduce::TempStorage*>(temp_storage); _bitpack_tmp = _buffer + delta::buffer_size; _current_idx = 0; _values_in_buffer = 0; } // Each thread calls this to add its current value. inline __device__ void add_value(T value, bool is_valid) { // Figure out the correct position for the given value. size_type const valid = is_valid; size_type pos; size_type num_valid; delta::index_scan(*_scan_tmp).ExclusiveSum(valid, pos, num_valid); if (is_valid) { _buffer[delta::rolling_idx(pos + _current_idx + _values_in_buffer)] = value; } __syncthreads(); if (threadIdx.x == 0) { _values_in_buffer += num_valid; // if first pass write header if (_current_idx == 0) { write_header(); _current_idx = 1; _values_in_buffer -= 1; } } __syncthreads(); if (_values_in_buffer >= delta::block_size) { flush(); } } // Called by each thread to flush data to the sink. inline __device__ uint8_t const* flush() { using cudf::detail::warp_size; __shared__ zigzag128_t block_min; int const t = threadIdx.x; int const warp_id = t / warp_size; int const lane_id = t % warp_size; if (_values_in_buffer <= 0) { return _dst; } // Calculate delta for this thread. size_type const idx = _current_idx + t; zigzag128_t const delta = idx < _num_values ? subtract(_buffer[delta::rolling_idx(idx)], _buffer[delta::rolling_idx(idx - 1)]) : std::numeric_limits<zigzag128_t>::max(); // Find min delta for the block. auto const min_delta = delta::block_reduce(*_block_tmp).Reduce(delta, cub::Min()); if (t == 0) { block_min = min_delta; } __syncthreads(); // Compute frame of reference for the block. uleb128_t const norm_delta = idx < _num_values ? subtract(delta, block_min) : 0; // Get max normalized delta for each warp, and use that to determine how many bits to use // for the bitpacking of this warp. zigzag128_t const warp_max = delta::warp_reduce(_warp_tmp[warp_id]).Reduce(norm_delta, cub::Max()); __syncwarp(); if (lane_id == 0) { _mb_bits[warp_id] = sizeof(zigzag128_t) * 8 - __clzll(warp_max); } __syncthreads(); // write block header if (t == 0) { write_block_header(block_min); } __syncthreads(); // Now each warp encodes its data...can calculate starting offset with _mb_bits. // NOTE: using a switch here rather than a loop because the compiler produces code that // uses fewer registers. int cumulative_bits = 0; switch (warp_id) { case 3: cumulative_bits += _mb_bits[2]; [[fallthrough]]; case 2: cumulative_bits += _mb_bits[1]; [[fallthrough]]; case 1: cumulative_bits += _mb_bits[0]; } uint8_t* const mb_ptr = _dst + cumulative_bits * delta::values_per_mini_block / 8; // encoding happens here auto const warp_idx = _current_idx + warp_id * delta::values_per_mini_block; if (warp_idx < _num_values) { auto const num_enc = min(delta::values_per_mini_block, _num_values - warp_idx); if (_mb_bits[warp_id] > 32) { delta::bitpack_mini_block<unsigned long long>( mb_ptr, norm_delta, num_enc, _mb_bits[warp_id], _bitpack_tmp); } else { delta::bitpack_mini_block<uint32_t>( mb_ptr, norm_delta, num_enc, _mb_bits[warp_id], _bitpack_tmp); } } __syncthreads(); // Last warp updates global delta ptr. if (warp_id == delta::num_mini_blocks - 1 && lane_id == 0) { _dst = mb_ptr + _mb_bits[warp_id] * delta::values_per_mini_block / 8; _current_idx = min(warp_idx + delta::values_per_mini_block, _num_values); _values_in_buffer = max(_values_in_buffer - delta::block_size, 0U); } __syncthreads(); return _dst; } }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/error.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <rmm/cuda_stream_view.hpp> #include <rmm/device_scalar.hpp> #include <cstdint> #include <sstream> namespace cudf::io::parquet { /** * @brief Wrapper around a `rmm::device_scalar` for use in reporting errors that occur in * kernel calls. * * The `kernel_error` object is created with a `rmm::cuda_stream_view` which is used throughout * the object's lifetime. */ class kernel_error { private: rmm::device_scalar<int32_t> _error_code; public: /** * @brief Construct a new `kernel_error` with an initial value of 0. * * Note: the initial value is set asynchronously. * * @throws `rmm::bad_alloc` if allocating the device memory for `initial_value` fails. * @throws `rmm::cuda_error` if copying `initial_value` to device memory fails. * * @param CUDA stream to use */ kernel_error(rmm::cuda_stream_view stream) : _error_code{0, stream} {} /** * @brief Return a pointer to the device memory for the error */ [[nodiscard]] auto data() { return _error_code.data(); } /** * @brief Return the current value of the error * * This uses the stream used to create this instance. This does a synchronize on the stream * this object was instantiated with. */ [[nodiscard]] auto value() const { return _error_code.value(_error_code.stream()); } /** * @brief Return a hexadecimal string representation of the current error code * * Returned string will have "0x" prepended. */ [[nodiscard]] std::string str() const { std::stringstream sstream; sstream << std::hex << value(); return "0x" + sstream.str(); } }; } // namespace cudf::io::parquet
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/rle_stream.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet_gpu.hpp" #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> namespace cudf::io::parquet::detail { template <int num_threads> constexpr int rle_stream_required_run_buffer_size() { constexpr int num_rle_stream_decode_warps = (num_threads / cudf::detail::warp_size) - 1; return (num_rle_stream_decode_warps * 2); } /** * @brief Read a 32-bit varint integer * * @param[in,out] cur The current data position, updated after the read * @param[in] end The end data position * * @return The 32-bit value read */ inline __device__ uint32_t get_vlq32(uint8_t const*& cur, uint8_t const* end) { uint32_t v = *cur++; if (v >= 0x80 && cur < end) { v = (v & 0x7f) | ((*cur++) << 7); if (v >= (0x80 << 7) && cur < end) { v = (v & ((0x7f << 7) | 0x7f)) | ((*cur++) << 14); if (v >= (0x80 << 14) && cur < end) { v = (v & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 21); if (v >= (0x80 << 21) && cur < end) { v = (v & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | ((*cur++) << 28); } } } } return v; } // an individual batch. processed by a warp. // batches should be in shared memory. template <typename level_t> struct rle_batch { uint8_t const* run_start; // start of the run we are part of int run_offset; // value offset of this batch from the start of the run level_t* output; int level_run; int size; __device__ inline void decode(uint8_t const* const end, int level_bits, int lane, int warp_id) { int output_pos = 0; int remain = size; // for bitpacked/literal runs, total size is always a multiple of 8. so we need to take care if // we are not starting/ending exactly on a run boundary uint8_t const* cur; if (level_run & 1) { int const effective_offset = cudf::util::round_down_safe(run_offset, 8); int const lead_values = (run_offset - effective_offset); output_pos -= lead_values; remain += lead_values; cur = run_start + ((effective_offset >> 3) * level_bits); } // if this is a repeated run, compute the repeated value int level_val; if (!(level_run & 1)) { level_val = run_start[0]; if (level_bits > 8) { level_val |= run_start[1] << 8; } } // process while (remain > 0) { int const batch_len = min(32, remain); // if this is a literal run. each thread computes its own level_val if (level_run & 1) { int const batch_len8 = (batch_len + 7) >> 3; if (lane < batch_len) { int bitpos = lane * level_bits; uint8_t const* cur_thread = cur + (bitpos >> 3); bitpos &= 7; level_val = 0; if (cur_thread < end) { level_val = cur_thread[0]; } cur_thread++; if (level_bits > 8 - bitpos && cur_thread < end) { level_val |= cur_thread[0] << 8; cur_thread++; if (level_bits > 16 - bitpos && cur_thread < end) { level_val |= cur_thread[0] << 16; } } level_val = (level_val >> bitpos) & ((1 << level_bits) - 1); } cur += batch_len8 * level_bits; } // store level_val if (lane < batch_len && (lane + output_pos) >= 0) { output[lane + output_pos] = level_val; } remain -= batch_len; output_pos += batch_len; } } }; // a single rle run. may be broken up into multiple rle_batches template <typename level_t> struct rle_run { int size; // total size of the run int output_pos; uint8_t const* start; int level_run; // level_run header value int remaining; __device__ __inline__ rle_batch<level_t> next_batch(level_t* const output, int max_size) { int const batch_len = min(max_size, remaining); int const run_offset = size - remaining; remaining -= batch_len; return rle_batch<level_t>{start, run_offset, output, level_run, batch_len}; } }; // a stream of rle_runs template <typename level_t, int decode_threads> struct rle_stream { static constexpr int num_rle_stream_decode_threads = decode_threads; // the -1 here is for the look-ahead warp that fills in the list of runs to be decoded // in an overlapped manner. so if we had 16 total warps: // - warp 0 would be filling in batches of runs to be processed // - warps 1-15 would be decoding the previous batch of runs generated static constexpr int num_rle_stream_decode_warps = (num_rle_stream_decode_threads / cudf::detail::warp_size) - 1; static constexpr int run_buffer_size = rle_stream_required_run_buffer_size<decode_threads>(); int level_bits; uint8_t const* start; uint8_t const* cur; uint8_t const* end; int max_output_values; int total_values; int cur_values; level_t* output; rle_run<level_t>* runs; int run_index; int run_count; int output_pos; bool spill; int next_batch_run_start; int next_batch_run_count; __device__ rle_stream(rle_run<level_t>* _runs) : runs(_runs) {} __device__ void init(int _level_bits, uint8_t const* _start, uint8_t const* _end, int _max_output_values, level_t* _output, int _total_values) { level_bits = _level_bits; start = _start; cur = _start; end = _end; max_output_values = _max_output_values; output = _output; run_index = 0; run_count = 0; output_pos = 0; spill = false; next_batch_run_start = 0; next_batch_run_count = 0; total_values = _total_values; cur_values = 0; } __device__ inline thrust::pair<int, int> get_run_batch() { return {next_batch_run_start, next_batch_run_count}; } // fill in up to num_rle_stream_decode_warps runs or until we reach the max_count limit. // this function is the critical hotspot. please be very careful altering it. __device__ inline void fill_run_batch(int max_count) { // if we spilled over, we've already got a run at the beginning next_batch_run_start = spill ? run_index - 1 : run_index; spill = false; // generate runs until we either run out of warps to decode them with, or // we cross the output limit. while (run_count < num_rle_stream_decode_warps && output_pos < max_count && cur < end) { auto& run = runs[rolling_index<run_buffer_size>(run_index)]; // Encoding::RLE // bytes for the varint header uint8_t const* _cur = cur; int const level_run = get_vlq32(_cur, end); int run_bytes = _cur - cur; // literal run if (level_run & 1) { int const run_size = (level_run >> 1) * 8; run.size = run_size; int const run_size8 = (run_size + 7) >> 3; run_bytes += run_size8 * level_bits; } // repeated value run else { run.size = (level_run >> 1); run_bytes++; // can this ever be > 16? it effectively encodes nesting depth so that would require // a nesting depth > 64k. if (level_bits > 8) { run_bytes++; } } run.output_pos = output_pos; run.start = _cur; run.level_run = level_run; run.remaining = run.size; cur += run_bytes; output_pos += run.size; run_count++; run_index++; } // the above loop computes a batch of runs to be processed. mark down // the number of runs because the code after this point resets run_count // for the next batch. each batch is returned via get_next_batch(). next_batch_run_count = run_count; // ------------------------------------- // prepare for the next run: // if we've reached the value output limit on the last run if (output_pos >= max_count) { // first, see if we've spilled over auto const& src = runs[rolling_index<run_buffer_size>(run_index - 1)]; int const spill_count = output_pos - max_count; // a spill has occurred in the current run. spill the extra values over into the beginning of // the next run. if (spill_count > 0) { auto& spill_run = runs[rolling_index<run_buffer_size>(run_index)]; spill_run = src; spill_run.output_pos = 0; spill_run.remaining = spill_count; run_count = 1; run_index++; output_pos = spill_run.remaining; spill = true; } // no actual spill needed. just reset the output pos else { output_pos = 0; run_count = 0; } } // didn't cross the limit, so reset the run count else { run_count = 0; } } __device__ inline int decode_next(int t) { int const output_count = min(max_output_values, (total_values - cur_values)); // special case. if level_bits == 0, just return all zeros. this should tremendously speed up // a very common case: columns with no nulls, especially if they are non-nested if (level_bits == 0) { int written = 0; while (written < output_count) { int const batch_size = min(num_rle_stream_decode_threads, output_count - written); if (t < batch_size) { output[written + t] = 0; } written += batch_size; } cur_values += output_count; return output_count; } // otherwise, full decode. int const warp_id = t / cudf::detail::warp_size; int const warp_decode_id = warp_id - 1; int const warp_lane = t % cudf::detail::warp_size; __shared__ int run_start; __shared__ int num_runs; __shared__ int values_processed; if (!t) { // carryover from the last call. thrust::tie(run_start, num_runs) = get_run_batch(); values_processed = 0; } __syncthreads(); do { // warp 0 reads ahead and generates batches of runs to be decoded by remaining warps. if (!warp_id) { // fill the next set of runs. fill_runs will generally be the bottleneck for any // kernel that uses an rle_stream. if (warp_lane == 0) { fill_run_batch(output_count); } } // remaining warps decode the runs else if (warp_decode_id < num_runs) { // each warp handles 1 run, regardless of size. // TODO: having each warp handle exactly 32 values would be ideal. as an example, the // repetition levels for one of the list benchmarks decodes in ~3ms total, while the // definition levels take ~11ms - the difference is entirely due to long runs in the // definition levels. auto& run = runs[rolling_index<run_buffer_size>(run_start + warp_decode_id)]; auto batch = run.next_batch(output + run.output_pos, min(run.remaining, (output_count - run.output_pos))); batch.decode(end, level_bits, warp_lane, warp_decode_id); // last warp updates total values processed if (warp_lane == 0 && warp_decode_id == num_runs - 1) { values_processed = run.output_pos + batch.size; } } __syncthreads(); // if we haven't run out of space, retrieve the next batch. otherwise leave it for the next // call. if (!t && values_processed < output_count) { thrust::tie(run_start, num_runs) = get_run_batch(); } __syncthreads(); } while (num_runs > 0 && values_processed < output_count); cur_values += values_processed; // valid for every thread return values_processed; } }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader.cpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl.hpp" namespace cudf::io::parquet::detail { reader::reader() = default; reader::reader(std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sources), options, stream, mr)) { } reader::~reader() = default; table_with_metadata reader::read(parquet_reader_options const& options) { // if the user has specified custom row bounds bool const uses_custom_row_bounds = options.get_num_rows().has_value() || options.get_skip_rows() != 0; return _impl->read(options.get_skip_rows(), options.get_num_rows(), uses_custom_row_bounds, options.get_row_groups(), options.get_filter()); } chunked_reader::chunked_reader(std::size_t chunk_read_limit, std::size_t pass_read_limit, std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { _impl = std::make_unique<impl>( chunk_read_limit, pass_read_limit, std::move(sources), options, stream, mr); } chunked_reader::~chunked_reader() = default; bool chunked_reader::has_next() const { return _impl->has_next(); } table_with_metadata chunked_reader::read_chunk() const { return _impl->read_chunk(); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.hpp * @brief cuDF-IO Parquet reader class implementation header */ #pragma once #include "parquet_gpu.hpp" #include "reader_impl_chunking.hpp" #include "reader_impl_helpers.hpp" #include <cudf/io/datasource.hpp> #include <cudf/io/detail/parquet.hpp> #include <cudf/io/parquet.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <memory> #include <optional> #include <vector> namespace cudf::io::parquet::detail { /** * @brief Implementation for Parquet reader */ class reader::impl { public: /** * @brief Constructor from an array of dataset sources with reader options. * * By using this constructor, each call to `read()` or `read_chunk()` will perform reading the * entire given file. * * @param sources Dataset sources * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit impl(std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Read an entire set or a subset of data and returns a set of columns * * @param skip_rows Number of rows to skip from the start * @param num_rows Number of rows to read * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds * @param row_group_indices Lists of row groups to read, one per source * @param filter Optional AST expression to filter output rows * * @return The set of columns along with metadata */ table_with_metadata read(int64_t skip_rows, std::optional<size_type> const& num_rows, bool uses_custom_row_bounds, host_span<std::vector<size_type> const> row_group_indices, std::optional<std::reference_wrapper<ast::expression const>> filter); /** * @brief Constructor from a chunk read limit and an array of dataset sources with reader options. * * By using this constructor, the reader will support iterative (chunked) reading through * `has_next() ` and `read_chunk()`. For example: * ``` * do { * auto const chunk = reader.read_chunk(); * // Process chunk * } while (reader.has_next()); * * ``` * * Reading the whole given file at once through `read()` function is still supported if * `chunk_read_limit == 0` (i.e., no reading limit) and `pass_read_limit == 0` (no temporary * memory limit) In such case, `read_chunk()` will also return rows of the entire file. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit * @param pass_read_limit Limit on memory usage for the purposes of decompression and processing * of input, or `0` if there is no limit. * @param sources Dataset sources * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit impl(std::size_t chunk_read_limit, std::size_t pass_read_limit, std::vector<std::unique_ptr<datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @copydoc cudf::io::chunked_parquet_reader::has_next */ bool has_next(); /** * @copydoc cudf::io::chunked_parquet_reader::read_chunk */ table_with_metadata read_chunk(); private: /** * @brief Perform the necessary data preprocessing for parsing file later on. * * @param skip_rows Number of rows to skip from the start * @param num_rows Number of rows to read, or `std::nullopt` to read all rows * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds * @param row_group_indices Lists of row groups to read (one per source), or empty if read all * @param filter Optional AST expression to filter row groups based on column chunk statistics */ void prepare_data(int64_t skip_rows, std::optional<size_type> const& num_rows, bool uses_custom_row_bounds, host_span<std::vector<size_type> const> row_group_indices, std::optional<std::reference_wrapper<ast::expression const>> filter); /** * @brief Create chunk information and start file reads * * @return pair of boolean indicating if compressed chunks were found and a vector of futures for * read completion */ std::pair<bool, std::vector<std::future<void>>> read_and_decompress_column_chunks(); /** * @brief Load and decompress the input file(s) into memory. */ void load_and_decompress_data(); /** * @brief Perform some preprocessing for page data and also compute the split locations * {skip_rows, num_rows} for chunked reading. * * There are several pieces of information we can't compute directly from row counts in * the parquet headers when dealing with nested schemas: * - The total sizes of all output columns at all nesting levels * - The starting output buffer offset for each page, for each nesting level * * For flat schemas, these values are computed during header decoding (see gpuDecodePageHeaders). * * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit */ void preprocess_pages(bool uses_custom_row_bounds, size_t chunk_read_limit); /** * @brief Allocate nesting information storage for all pages and set pointers to it. * * One large contiguous buffer of PageNestingInfo structs is allocated and * distributed among the PageInfo structs. * * Note that this gets called even in the flat schema case so that we have a * consistent place to store common information such as value counts, etc. */ void allocate_nesting_info(); /** * @brief Allocate space for use when decoding definition/repetition levels. * * One large contiguous buffer of data allocated and * distributed among the PageInfo structs. */ void allocate_level_decode_space(); /** * @brief Populate the output table metadata from the parquet file metadata. * * @param out_metadata The output table metadata to add to */ void populate_metadata(table_metadata& out_metadata); /** * @brief Read a chunk of data and return an output table. * * This function is called internally and expects all preprocessing steps have already been done. * * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds * @param filter Optional AST expression to filter output rows * @return The output table along with columns' metadata */ table_with_metadata read_chunk_internal( bool uses_custom_row_bounds, std::optional<std::reference_wrapper<ast::expression const>> filter); /** * @brief Finalize the output table by adding empty columns for the non-selected columns in * schema. * * @param out_metadata The output table metadata * @param out_columns The columns for building the output table * @param filter Optional AST expression to filter output rows * @return The output table along with columns' metadata */ table_with_metadata finalize_output( table_metadata& out_metadata, std::vector<std::unique_ptr<column>>& out_columns, std::optional<std::reference_wrapper<ast::expression const>> filter); /** * @brief Allocate data buffers for the output columns. * * @param skip_rows Crop all rows below skip_rows * @param num_rows Maximum number of rows to read * @param uses_custom_row_bounds Whether or not num_rows and skip_rows represents user-specific * bounds */ void allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds); /** * @brief Calculate per-page offsets for string data * * @return Vector of total string data sizes for each column */ std::vector<size_t> calculate_page_string_offsets(); /** * @brief Converts the page data and outputs to columns. * * @param skip_rows Minimum number of rows from start * @param num_rows Number of rows to output */ void decode_page_data(size_t skip_rows, size_t num_rows); /** * @brief Creates file-wide parquet chunk information. * * Creates information about all chunks in the file, storing it in * the file-wide _file_itm_data structure. */ void create_global_chunk_info(); /** * @brief Computes all of the passes we will perform over the file. */ void compute_input_passes(); /** * @brief Close out the existing pass (if any) and prepare for the next pass. */ void setup_next_pass(); /** * @brief Given a set of pages that have had their sizes computed by nesting level and * a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing * a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes. */ void compute_splits_for_pass(); private: rmm::cuda_stream_view _stream; rmm::mr::device_memory_resource* _mr = nullptr; std::vector<std::unique_ptr<datasource>> _sources; std::unique_ptr<aggregate_reader_metadata> _metadata; // input columns to be processed std::vector<input_column_info> _input_columns; // Buffers for generating output columns std::vector<cudf::io::detail::inline_column_buffer> _output_buffers; // Buffers copied from `_output_buffers` after construction for reuse std::vector<cudf::io::detail::inline_column_buffer> _output_buffers_template; // _output_buffers associated schema indices std::vector<int> _output_column_schemas; // _output_buffers associated metadata std::unique_ptr<table_metadata> _output_metadata; bool _strings_to_categorical = false; std::optional<std::vector<reader_column_schema>> _reader_column_schema; data_type _timestamp_type{type_id::EMPTY}; // chunked reading happens in 2 parts: // // At the top level, the entire file is divided up into "passes" omn which we try and limit the // total amount of temporary memory (compressed data, decompressed data) in use // via _input_pass_read_limit. // // Within a pass, we produce one or more chunks of output, whose maximum total // byte size is controlled by _output_chunk_read_limit. file_intermediate_data _file_itm_data; bool _file_preprocessed{false}; std::unique_ptr<pass_intermediate_data> _pass_itm_data; bool _pass_preprocessed{false}; std::size_t _output_chunk_read_limit{0}; // output chunk size limit in bytes std::size_t _input_pass_read_limit{0}; // input pass memory usage limit in bytes std::size_t _current_input_pass{0}; // current input pass index std::size_t _chunk_count{0}; // how many output chunks we have produced }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_hdr.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.hpp" #include <io/utilities/block_utils.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <thrust/tuple.h> #include <rmm/cuda_stream_view.hpp> namespace cudf::io::parquet::detail { // Minimal thrift implementation for parsing page headers // https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md struct byte_stream_s { uint8_t const* cur{}; uint8_t const* end{}; uint8_t const* base{}; // Parsed symbols PageType page_type{}; PageInfo page{}; ColumnChunkDesc ck{}; }; /** * @brief Get current byte from the byte stream * * @param[in] bs Byte stream * * @return Current byte pointed to by the byte stream */ inline __device__ unsigned int getb(byte_stream_s* bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s* bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } /** * @brief Decode unsigned integer from a byte stream using VarInt encoding * * Concatenate least significant 7 bits of each byte to form a 32 bit * integer. Most significant bit of each byte indicates if more bytes * are to be used to form the number. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ __device__ uint32_t get_u32(byte_stream_s* bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } /** * @brief Decode signed integer from a byte stream using zigzag encoding * * The number n encountered in a byte stream translates to * -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same. * i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively. * * @param[in] bs Byte stream * * @return Decoded 32 bit integer */ inline __device__ int32_t get_i32(byte_stream_s* bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s* bs, int field_type) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { unsigned int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; field_type = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (field_type) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_BYTE: skip_bytes(bs, 1); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled auto const c = getb(bs); int n = c >> 4; if (n == 0xf) { n = get_u32(bs); } field_type = c & 0xf; if (field_type == ST_FLD_STRUCT) { struct_depth += n; } else { rep_cnt = n; } } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } /** * @brief Determine which decode kernel to run for the given page. * * @param page The page to decode * @param chunk Column chunk the page belongs to * @return `kernel_mask_bits` value for the given page */ __device__ decode_kernel_mask kernel_mask_for_page(PageInfo const& page, ColumnChunkDesc const& chunk) { if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return decode_kernel_mask::NONE; } if (page.encoding == Encoding::DELTA_BINARY_PACKED) { return decode_kernel_mask::DELTA_BINARY; } else if (page.encoding == Encoding::DELTA_BYTE_ARRAY) { return decode_kernel_mask::DELTA_BYTE_ARRAY; } else if (is_string_col(chunk)) { return decode_kernel_mask::STRING; } // non-string, non-delta return decode_kernel_mask::GENERAL; } /** * @brief Functor to set value to 32 bit integer read from byte stream * * @return True if field type is not int32 */ struct ParquetFieldInt32 { int field; int32_t& val; __device__ ParquetFieldInt32(int f, int32_t& v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s* bs, int field_type) { val = get_i32(bs); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to set value to enum read from byte stream * * @return True if field type is not int32 */ template <typename Enum> struct ParquetFieldEnum { int field; Enum& val; __device__ ParquetFieldEnum(int f, Enum& v) : field(f), val(v) {} inline __device__ bool operator()(byte_stream_s* bs, int field_type) { val = static_cast<Enum>(get_i32(bs)); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to run operator on byte stream * * @return True if field type is not struct type or if the calling operator * fails */ template <typename Operator> struct ParquetFieldStruct { int field; Operator op; __device__ ParquetFieldStruct(int f) : field(f) {} inline __device__ bool operator()(byte_stream_s* bs, int field_type) { return ((field_type != ST_FLD_STRUCT) || !op(bs)); } }; /** * @brief Functor to run an operator * * The purpose of this functor is to replace a switch case. If the field in * the argument is equal to the field specified in any element of the tuple * of operators then it is run with the byte stream and field type arguments. * * If the field does not match any of the functors then skip_struct_field is * called over the byte stream. * * @return Return value of the selected operator or false if no operator * matched the field value */ template <int index> struct FunctionSwitchImpl { template <typename... Operator> static inline __device__ bool run(byte_stream_s* bs, int field_type, int const& field, thrust::tuple<Operator...>& ops) { if (field == thrust::get<index>(ops).field) { return thrust::get<index>(ops)(bs, field_type); } else { return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops); } } }; template <> struct FunctionSwitchImpl<0> { template <typename... Operator> static inline __device__ bool run(byte_stream_s* bs, int field_type, int const& field, thrust::tuple<Operator...>& ops) { if (field == thrust::get<0>(ops).field) { return thrust::get<0>(ops)(bs, field_type); } else { skip_struct_field(bs, field_type); return false; } } }; /** * @brief Function to parse page header based on the tuple of functors provided * * Bytes are read from the byte stream and the field delta and field type are * matched up against user supplied reading functors. If they match then the * corresponding values are written to references pointed to by the functors. * * @return Returns false if an unexpected field is encountered while reading * byte stream. Otherwise true is returned. */ template <typename... Operator> inline __device__ bool parse_header(thrust::tuple<Operator...>& op, byte_stream_s* bs) { constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1; int field = 0; while (true) { auto const current_byte = getb(bs); if (!current_byte) break; int const field_delta = current_byte >> 4; int const field_type = current_byte & 0xf; field = field_delta ? field + field_delta : get_i32(bs); bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op); if (exit_function) { return false; } } return true; } struct gpuParseDataPageHeader { __device__ bool operator()(byte_stream_s* bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding), ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding), ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding)); return parse_header(op, bs); } }; struct gpuParseDictionaryPageHeader { __device__ bool operator()(byte_stream_s* bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldEnum<Encoding>(2, bs->page.encoding)); return parse_header(op, bs); } }; struct gpuParseDataPageHeaderV2 { __device__ bool operator()(byte_stream_s* bs) { auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), ParquetFieldInt32(2, bs->page.num_nulls), ParquetFieldInt32(3, bs->page.num_rows), ParquetFieldEnum<Encoding>(4, bs->page.encoding), ParquetFieldInt32(5, bs->page.lvl_bytes[level_type::DEFINITION]), ParquetFieldInt32(6, bs->page.lvl_bytes[level_type::REPETITION])); return parse_header(op, bs); } }; struct gpuParsePageHeader { __device__ bool operator()(byte_stream_s* bs) { auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type), ParquetFieldInt32(2, bs->page.uncompressed_page_size), ParquetFieldInt32(3, bs->page.compressed_page_size), ParquetFieldStruct<gpuParseDataPageHeader>(5), ParquetFieldStruct<gpuParseDictionaryPageHeader>(7), ParquetFieldStruct<gpuParseDataPageHeaderV2>(8)); return parse_header(op, bs); } }; /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks, int32_t* error_code) { using cudf::detail::warp_size; gpuParsePageHeader parse_page_header; __shared__ byte_stream_s bs_g[4]; int32_t error[4] = {0}; auto const lane_id = threadIdx.x % warp_size; auto const warp_id = threadIdx.x / warp_size; auto const chunk = (blockIdx.x * 4) + warp_id; auto const bs = &bs_g[warp_id]; if (chunk < num_chunks and lane_id == 0) { bs->ck = chunks[chunk]; } if (lane_id == 0) { error[warp_id] = 0; } __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo* page_info; if (lane_id == 0) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.src_col_schema = bs->ck.src_col_schema; // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row = 0; bs->page.num_rows = 0; bs->page.skipped_values = -1; bs->page.skipped_leaf_values = 0; bs->page.str_bytes = 0; bs->page.temp_string_size = 0; bs->page.temp_string_buf = nullptr; bs->page.kernel_mask = decode_kernel_mask::NONE; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; __syncwarp(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (lane_id == 0) { // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; bs->page.flags = 0; // zero out V2 info bs->page.num_nulls = 0; bs->page.lvl_bytes[level_type::DEFINITION] = 0; bs->page.lvl_bytes[level_type::REPETITION] = 0; if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) { if (not is_supported_encoding(bs->page.encoding)) { error[warp_id] |= static_cast<int32_t>(decode_error::UNSUPPORTED_ENCODING); } switch (bs->page_type) { case PageType::DATA_PAGE: index_out = num_dict_pages + data_page_count; data_page_count++; // this computation is only valid for flat schemas. for nested schemas, // they will be recomputed in the preprocess step by examining repetition and // definition levels bs->page.num_rows = bs->page.num_input_values; values_found += bs->page.num_input_values; break; case PageType::DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags |= PAGEINFO_FLAGS_V2; values_found += bs->page.num_input_values; // V2 only uses RLE, so it was removed from the header bs->page.definition_level_encoding = Encoding::RLE; bs->page.repetition_level_encoding = Encoding::RLE; break; case PageType::DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags |= PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t*>(bs->cur); bs->cur += bs->page.compressed_page_size; if (bs->cur > bs->end) { error[warp_id] |= static_cast<int32_t>(decode_error::DATA_STREAM_OVERRUN); } bs->page.kernel_mask = kernel_mask_for_page(bs->page, bs->ck); } else { bs->cur = bs->end; } } index_out = shuffle(index_out); if (index_out >= 0 && index_out < max_num_pages && lane_id == 0) { page_info[index_out] = bs->page; } num_values = shuffle(num_values); __syncwarp(); } if (lane_id == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; if (error[warp_id] != 0) { set_error(error[warp_id], error_code); } } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks */ // blockDim {128,1,1} __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int lane_id = threadIdx.x % 32; int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); ColumnChunkDesc* const ck = &chunk_g[threadIdx.x / 32]; if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk]; __syncthreads(); if (chunk >= num_chunks) { return; } if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string string_index_pair* dict_index = ck->str_dict_index; uint8_t const* dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_input_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].first = reinterpret_cast<char const*>(dict + pos + 4); dict_index[i].second = len; } } } void __host__ DecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks, int32_t* error_code, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks, error_code); } void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl_helpers.cpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl_helpers.hpp" #include <io/utilities/row_selection.hpp> #include <numeric> #include <regex> namespace cudf::io::parquet::detail { namespace { ConvertedType logical_type_to_converted_type(thrust::optional<LogicalType> const& logical) { if (not logical.has_value()) { return UNKNOWN; } switch (logical->type) { case LogicalType::STRING: return UTF8; case LogicalType::MAP: return MAP; case LogicalType::LIST: return LIST; case LogicalType::ENUM: return ENUM; case LogicalType::DECIMAL: return DECIMAL; // TODO use decimal scale/precision case LogicalType::DATE: return DATE; case LogicalType::TIME: if (logical->is_time_millis()) { return TIME_MILLIS; } else if (logical->is_time_micros()) { return TIME_MICROS; } break; case LogicalType::TIMESTAMP: if (logical->is_timestamp_millis()) { return TIMESTAMP_MILLIS; } else if (logical->is_timestamp_micros()) { return TIMESTAMP_MICROS; } break; case LogicalType::INTEGER: switch (logical->bit_width()) { case 8: return logical->is_signed() ? INT_8 : UINT_8; case 16: return logical->is_signed() ? INT_16 : UINT_16; case 32: return logical->is_signed() ? INT_32 : UINT_32; case 64: return logical->is_signed() ? INT_64 : UINT_64; default: break; } case LogicalType::UNKNOWN: return NA; case LogicalType::JSON: return JSON; case LogicalType::BSON: return BSON; default: break; } return UNKNOWN; } } // namespace /** * @brief Function that translates Parquet datatype to cuDF type enum */ type_id to_type_id(SchemaElement const& schema, bool strings_to_categorical, type_id timestamp_type_id) { auto const physical = schema.type; auto const logical_type = schema.logical_type; auto converted_type = schema.converted_type; int32_t decimal_precision = schema.decimal_precision; // FIXME(ets): this should just use logical type to deduce the type_id. then fall back to // converted_type if logical_type isn't set // Logical type used for actual data interpretation; the legacy converted type // is superseded by 'logical' type whenever available. auto const inferred_converted_type = logical_type_to_converted_type(logical_type); if (inferred_converted_type != UNKNOWN) { converted_type = inferred_converted_type; } if (inferred_converted_type == DECIMAL) { decimal_precision = schema.logical_type->precision(); } switch (converted_type.value_or(UNKNOWN)) { case UINT_8: return type_id::UINT8; case INT_8: return type_id::INT8; case UINT_16: return type_id::UINT16; case INT_16: return type_id::INT16; case UINT_32: return type_id::UINT32; case UINT_64: return type_id::UINT64; case DATE: return type_id::TIMESTAMP_DAYS; case TIME_MILLIS: return type_id::DURATION_MILLISECONDS; case TIME_MICROS: return type_id::DURATION_MICROSECONDS; case TIMESTAMP_MILLIS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MILLISECONDS; case TIMESTAMP_MICROS: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_MICROSECONDS; case DECIMAL: if (physical == INT32) { return type_id::DECIMAL32; } if (physical == INT64) { return type_id::DECIMAL64; } if (physical == FIXED_LEN_BYTE_ARRAY) { if (schema.type_length <= static_cast<int32_t>(sizeof(int32_t))) { return type_id::DECIMAL32; } if (schema.type_length <= static_cast<int32_t>(sizeof(int64_t))) { return type_id::DECIMAL64; } if (schema.type_length <= static_cast<int32_t>(sizeof(__int128_t))) { return type_id::DECIMAL128; } } if (physical == BYTE_ARRAY) { CUDF_EXPECTS(decimal_precision <= MAX_DECIMAL128_PRECISION, "Invalid decimal precision"); if (decimal_precision <= MAX_DECIMAL32_PRECISION) { return type_id::DECIMAL32; } else if (decimal_precision <= MAX_DECIMAL64_PRECISION) { return type_id::DECIMAL64; } else { return type_id::DECIMAL128; } } CUDF_FAIL("Invalid representation of decimal type"); break; // maps are just List<Struct<>>. case MAP: case LIST: return type_id::LIST; case NA: return type_id::STRING; // return type_id::EMPTY; //TODO(kn): enable after Null/Empty column support default: break; } if (inferred_converted_type == UNKNOWN and physical == INT64 and logical_type.has_value()) { if (logical_type->is_timestamp_nanos()) { return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; } else if (logical_type->is_time_nanos()) { return type_id::DURATION_NANOSECONDS; } } // is it simply a struct? if (schema.is_struct()) { return type_id::STRUCT; } // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { case BOOLEAN: return type_id::BOOL8; case INT32: return type_id::INT32; case INT64: return type_id::INT64; case FLOAT: return type_id::FLOAT32; case DOUBLE: return type_id::FLOAT64; case BYTE_ARRAY: case FIXED_LEN_BYTE_ARRAY: // Can be mapped to INT32 (32-bit hash) or STRING return strings_to_categorical ? type_id::INT32 : type_id::STRING; case INT96: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; default: break; } return type_id::EMPTY; } void metadata::sanitize_schema() { // Parquet isn't very strict about incoming metadata. Lots of things can and should be inferred. // There are also a lot of rules that simply aren't followed and are expected to be worked around. // This step sanitizes the metadata to something that isn't ambiguous. // // Take, for example, the following schema: // // required group field_id=-1 user { // required int32 field_id=-1 id; // optional group field_id=-1 phoneNumbers { // repeated group field_id=-1 phone { // required int64 field_id=-1 number; // optional binary field_id=-1 kind (String); // } // } // } // // This real-world example has no annotations telling us what is a list or a struct. On the // surface this looks like a column of id's and a column of list<struct<int64, string>>, but this // actually should be interpreted as a struct<list<struct<int64, string>>>. The phoneNumbers field // has to be a struct because it is a group with no repeated tag and we have no annotation. The // repeated group is actually BOTH a struct due to the multiple children and a list due to // repeated. // // This code attempts to make this less messy for the code that follows. std::function<void(size_t)> process = [&](size_t schema_idx) -> void { if (schema_idx < 0) { return; } auto& schema_elem = schema[schema_idx]; if (schema_idx != 0 && schema_elem.type == UNDEFINED_TYPE) { auto const parent_type = schema[schema_elem.parent_idx].converted_type; if (schema_elem.repetition_type == REPEATED && schema_elem.num_children > 1 && parent_type != LIST && parent_type != MAP) { // This is a list of structs, so we need to mark this as a list, but also // add a struct child and move this element's children to the struct schema_elem.converted_type = LIST; schema_elem.repetition_type = OPTIONAL; auto const struct_node_idx = static_cast<size_type>(schema.size()); SchemaElement struct_elem; struct_elem.name = "struct_node"; struct_elem.repetition_type = REQUIRED; struct_elem.num_children = schema_elem.num_children; struct_elem.type = UNDEFINED_TYPE; struct_elem.converted_type = UNKNOWN; // swap children struct_elem.children_idx = std::move(schema_elem.children_idx); schema_elem.children_idx = {struct_node_idx}; schema_elem.num_children = 1; struct_elem.max_definition_level = schema_elem.max_definition_level; struct_elem.max_repetition_level = schema_elem.max_repetition_level; schema_elem.max_definition_level--; schema_elem.max_repetition_level = schema[schema_elem.parent_idx].max_repetition_level; // change parent index on new node and on children struct_elem.parent_idx = schema_idx; for (auto& child_idx : struct_elem.children_idx) { schema[child_idx].parent_idx = struct_node_idx; } // add our struct schema.push_back(struct_elem); } } for (auto& child_idx : schema_elem.children_idx) { process(child_idx); } }; process(0); } metadata::metadata(datasource* source) { constexpr auto header_len = sizeof(file_header_s); constexpr auto ender_len = sizeof(file_ender_s); auto const len = source->size(); auto const header_buffer = source->host_read(0, header_len); auto const header = reinterpret_cast<file_header_s const*>(header_buffer->data()); auto const ender_buffer = source->host_read(len - ender_len, ender_len); auto const ender = reinterpret_cast<file_ender_s const*>(ender_buffer->data()); CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source"); CUDF_EXPECTS(header->magic == parquet_magic && ender->magic == parquet_magic, "Corrupted header or footer"); CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len), "Incorrect footer length"); auto const buffer = source->host_read(len - ender->footer_len - ender_len, ender->footer_len); CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); sanitize_schema(); } std::vector<metadata> aggregate_reader_metadata::metadatas_from_sources( host_span<std::unique_ptr<datasource> const> sources) { std::vector<metadata> metadatas; std::transform( sources.begin(), sources.end(), std::back_inserter(metadatas), [](auto const& source) { return metadata(source.get()); }); return metadatas; } std::vector<std::unordered_map<std::string, std::string>> aggregate_reader_metadata::collect_keyval_metadata() const { std::vector<std::unordered_map<std::string, std::string>> kv_maps; std::transform(per_file_metadata.cbegin(), per_file_metadata.cend(), std::back_inserter(kv_maps), [](auto const& pfm) { std::unordered_map<std::string, std::string> kv_map; std::transform(pfm.key_value_metadata.cbegin(), pfm.key_value_metadata.cend(), std::inserter(kv_map, kv_map.end()), [](auto const& kv) { return std::pair{kv.key, kv.value}; }); return kv_map; }); return kv_maps; } int64_t aggregate_reader_metadata::calc_num_rows() const { return std::accumulate( per_file_metadata.cbegin(), per_file_metadata.cend(), 0l, [](auto& sum, auto& pfm) { auto const rowgroup_rows = std::accumulate( pfm.row_groups.cbegin(), pfm.row_groups.cend(), 0l, [](auto& rg_sum, auto& rg) { return rg_sum + rg.num_rows; }); CUDF_EXPECTS(pfm.num_rows == 0 || pfm.num_rows == rowgroup_rows, "Header and row groups disagree about number of rows in file!"); return sum + (pfm.num_rows == 0 && rowgroup_rows > 0 ? rowgroup_rows : pfm.num_rows); }); } size_type aggregate_reader_metadata::calc_num_row_groups() const { return std::accumulate( per_file_metadata.cbegin(), per_file_metadata.cend(), 0, [](auto& sum, auto& pfm) { return sum + pfm.row_groups.size(); }); } aggregate_reader_metadata::aggregate_reader_metadata( host_span<std::unique_ptr<datasource> const> sources) : per_file_metadata(metadatas_from_sources(sources)), keyval_maps(collect_keyval_metadata()), num_rows(calc_num_rows()), num_row_groups(calc_num_row_groups()) { if (per_file_metadata.size() > 0) { auto const& first_meta = per_file_metadata.front(); auto const num_cols = first_meta.row_groups.size() > 0 ? first_meta.row_groups.front().columns.size() : 0; auto const& schema = first_meta.schema; // Verify that the input files have matching numbers of columns and schema. for (auto const& pfm : per_file_metadata) { if (pfm.row_groups.size() > 0) { CUDF_EXPECTS(num_cols == pfm.row_groups.front().columns.size(), "All sources must have the same number of columns"); } CUDF_EXPECTS(schema == pfm.schema, "All sources must have the same schema"); } } } RowGroup const& aggregate_reader_metadata::get_row_group(size_type row_group_index, size_type src_idx) const { CUDF_EXPECTS(src_idx >= 0 && src_idx < static_cast<size_type>(per_file_metadata.size()), "invalid source index"); return per_file_metadata[src_idx].row_groups[row_group_index]; } ColumnChunkMetaData const& aggregate_reader_metadata::get_column_metadata(size_type row_group_index, size_type src_idx, int schema_idx) const { auto col = std::find_if(per_file_metadata[src_idx].row_groups[row_group_index].columns.begin(), per_file_metadata[src_idx].row_groups[row_group_index].columns.end(), [schema_idx](ColumnChunk const& col) { return col.schema_idx == schema_idx; }); CUDF_EXPECTS(col != std::end(per_file_metadata[src_idx].row_groups[row_group_index].columns), "Found no metadata for schema index"); return col->meta_data; } std::string aggregate_reader_metadata::get_pandas_index() const { // Assumes that all input files have the same metadata // TODO: verify this assumption auto it = keyval_maps[0].find("pandas"); if (it != keyval_maps[0].end()) { // Captures a list of quoted strings found inside square brackets after `"index_columns":` // Inside quotes supports newlines, brackets, escaped quotes, etc. // One-liner regex: // "index_columns"\s*:\s*\[\s*((?:"(?:|(?:.*?(?![^\\]")).?)[^\\]?",?\s*)*)\] // Documented below. std::regex index_columns_expr{ R"("index_columns"\s*:\s*\[\s*)" // match preamble, opening square bracket, whitespace R"(()" // Open first capturing group R"((?:")" // Open non-capturing group match opening quote R"((?:|(?:.*?(?![^\\]")).?))" // match empty string or anything between quotes R"([^\\]?")" // Match closing non-escaped quote R"(,?\s*)" // Match optional comma and whitespace R"()*)" // Close non-capturing group and repeat 0 or more times R"())" // Close first capturing group R"(\])" // Match closing square brackets }; std::smatch sm; if (std::regex_search(it->second, sm, index_columns_expr)) { return sm[1].str(); } } return ""; } std::vector<std::string> aggregate_reader_metadata::get_pandas_index_names() const { std::vector<std::string> names; auto str = get_pandas_index(); if (str.length() != 0) { std::regex index_name_expr{R"(\"((?:\\.|[^\"])*)\")"}; std::smatch sm; while (std::regex_search(str, sm, index_name_expr)) { if (sm.size() == 2) { // 2 = whole match, first item if (std::find(names.begin(), names.end(), sm[1].str()) == names.end()) { std::regex esc_quote{R"(\\")"}; names.emplace_back(std::regex_replace(sm[1].str(), esc_quote, R"(")")); } } str = sm.suffix(); } } return names; } std::tuple<int64_t, size_type, std::vector<row_group_info>> aggregate_reader_metadata::select_row_groups( host_span<std::vector<size_type> const> row_group_indices, int64_t skip_rows_opt, std::optional<size_type> const& num_rows_opt, host_span<data_type const> output_dtypes, std::optional<std::reference_wrapper<ast::expression const>> filter, rmm::cuda_stream_view stream) const { std::optional<std::vector<std::vector<size_type>>> filtered_row_group_indices; if (filter.has_value()) { filtered_row_group_indices = filter_row_groups(row_group_indices, output_dtypes, filter.value(), stream); if (filtered_row_group_indices.has_value()) { row_group_indices = host_span<std::vector<size_type> const>(filtered_row_group_indices.value()); } } std::vector<row_group_info> selection; auto [rows_to_skip, rows_to_read] = [&]() { if (not row_group_indices.empty()) { return std::pair<int64_t, size_type>{}; } auto const from_opts = cudf::io::detail::skip_rows_num_rows_from_options( skip_rows_opt, num_rows_opt, get_num_rows()); return std::pair{static_cast<int64_t>(from_opts.first), from_opts.second}; }(); if (!row_group_indices.empty()) { CUDF_EXPECTS(row_group_indices.size() == per_file_metadata.size(), "Must specify row groups for each source"); for (size_t src_idx = 0; src_idx < row_group_indices.size(); ++src_idx) { for (auto const& rowgroup_idx : row_group_indices[src_idx]) { CUDF_EXPECTS( rowgroup_idx >= 0 && rowgroup_idx < static_cast<size_type>(per_file_metadata[src_idx].row_groups.size()), "Invalid rowgroup index"); selection.emplace_back(rowgroup_idx, rows_to_read, src_idx); rows_to_read += get_row_group(rowgroup_idx, src_idx).num_rows; } } } else { size_type count = 0; for (size_t src_idx = 0; src_idx < per_file_metadata.size(); ++src_idx) { for (size_t rg_idx = 0; rg_idx < per_file_metadata[src_idx].row_groups.size(); ++rg_idx) { auto const chunk_start_row = count; count += get_row_group(rg_idx, src_idx).num_rows; if (count > rows_to_skip || count == 0) { selection.emplace_back(rg_idx, chunk_start_row, src_idx); } if (count >= rows_to_skip + rows_to_read) { break; } } } } return {rows_to_skip, rows_to_read, std::move(selection)}; } std::tuple<std::vector<input_column_info>, std::vector<cudf::io::detail::inline_column_buffer>, std::vector<size_type>> aggregate_reader_metadata::select_columns(std::optional<std::vector<std::string>> const& use_names, bool include_index, bool strings_to_categorical, type_id timestamp_type_id) const { auto find_schema_child = [&](SchemaElement const& schema_elem, std::string const& name) { auto const& col_schema_idx = std::find_if(schema_elem.children_idx.cbegin(), schema_elem.children_idx.cend(), [&](size_t col_schema_idx) { return get_schema(col_schema_idx).name == name; }); return (col_schema_idx != schema_elem.children_idx.end()) ? static_cast<size_type>(*col_schema_idx) : -1; }; std::vector<cudf::io::detail::inline_column_buffer> output_columns; std::vector<input_column_info> input_columns; std::vector<int> nesting; // Return true if column path is valid. e.g. if the path is {"struct1", "child1"}, then it is // valid if "struct1.child1" exists in this file's schema. If "struct1" exists but "child1" is // not a child of "struct1" then the function will return false for "struct1" std::function<bool( column_name_info const*, int, std::vector<cudf::io::detail::inline_column_buffer>&, bool)> build_column = [&](column_name_info const* col_name_info, int schema_idx, std::vector<cudf::io::detail::inline_column_buffer>& out_col_array, bool has_list_parent) { if (schema_idx < 0) { return false; } auto const& schema_elem = get_schema(schema_idx); // if schema_elem is a stub then it does not exist in the column_name_info and column_buffer // hierarchy. So continue on if (schema_elem.is_stub()) { // is this legit? CUDF_EXPECTS(schema_elem.num_children == 1, "Unexpected number of children for stub"); auto child_col_name_info = (col_name_info) ? &col_name_info->children[0] : nullptr; return build_column( child_col_name_info, schema_elem.children_idx[0], out_col_array, has_list_parent); } auto const one_level_list = schema_elem.is_one_level_list(get_schema(schema_elem.parent_idx)); // if we're at the root, this is a new output column auto const col_type = one_level_list ? type_id::LIST : to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); auto const dtype = to_data_type(col_type, schema_elem); cudf::io::detail::inline_column_buffer output_col(dtype, schema_elem.repetition_type == OPTIONAL); if (has_list_parent) { output_col.user_data |= PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT; } // store the index of this element if inserted in out_col_array nesting.push_back(static_cast<int>(out_col_array.size())); output_col.name = schema_elem.name; // build each child bool path_is_valid = false; if (col_name_info == nullptr or col_name_info->children.empty()) { // add all children of schema_elem. // At this point, we can no longer pass a col_name_info to build_column for (int idx = 0; idx < schema_elem.num_children; idx++) { path_is_valid |= build_column(nullptr, schema_elem.children_idx[idx], output_col.children, has_list_parent || col_type == type_id::LIST); } } else { for (size_t idx = 0; idx < col_name_info->children.size(); idx++) { path_is_valid |= build_column(&col_name_info->children[idx], find_schema_child(schema_elem, col_name_info->children[idx].name), output_col.children, has_list_parent || col_type == type_id::LIST); } } // if I have no children, we're at a leaf and I'm an input column (that is, one with actual // data stored) so add me to the list. if (schema_elem.num_children == 0) { input_column_info& input_col = input_columns.emplace_back( input_column_info{schema_idx, schema_elem.name, schema_elem.max_repetition_level > 0}); // set up child output column for one-level encoding list if (one_level_list) { // determine the element data type auto const element_type = to_type_id(schema_elem, strings_to_categorical, timestamp_type_id); auto const element_dtype = to_data_type(element_type, schema_elem); cudf::io::detail::inline_column_buffer element_col( element_dtype, schema_elem.repetition_type == OPTIONAL); if (has_list_parent || col_type == type_id::LIST) { element_col.user_data |= PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT; } // store the index of this element nesting.push_back(static_cast<int>(output_col.children.size())); // TODO: not sure if we should assign a name or leave it blank element_col.name = "element"; output_col.children.push_back(std::move(element_col)); } std::copy(nesting.cbegin(), nesting.cend(), std::back_inserter(input_col.nesting)); // pop off the extra nesting element. if (one_level_list) { nesting.pop_back(); } path_is_valid = true; // If we're able to reach leaf then path is valid } if (path_is_valid) { out_col_array.push_back(std::move(output_col)); } nesting.pop_back(); return path_is_valid; }; std::vector<int> output_column_schemas; // // there is not necessarily a 1:1 mapping between input columns and output columns. // For example, parquet does not explicitly store a ColumnChunkDesc for struct columns. // The "structiness" is simply implied by the schema. For example, this schema: // required group field_id=1 name { // required binary field_id=2 firstname (String); // required binary field_id=3 middlename (String); // required binary field_id=4 lastname (String); // } // will only contain 3 internal columns of data (firstname, middlename, lastname). But of // course "name" is ultimately the struct column we want to return. // // "firstname", "middlename" and "lastname" represent the input columns in the file that we // process to produce the final cudf "name" column. // // A user can ask for a single field out of the struct e.g. firstname. // In this case they'll pass a fully qualified name to the schema element like // ["name", "firstname"] // auto const& root = get_schema(0); if (not use_names.has_value()) { for (auto const& schema_idx : root.children_idx) { build_column(nullptr, schema_idx, output_columns, false); output_column_schemas.push_back(schema_idx); } } else { struct path_info { std::string full_path; int schema_idx; }; // Convert schema into a vector of every possible path std::vector<path_info> all_paths; std::function<void(std::string, int)> add_path = [&](std::string path_till_now, int schema_idx) { auto const& schema_elem = get_schema(schema_idx); std::string curr_path = path_till_now + schema_elem.name; all_paths.push_back({curr_path, schema_idx}); for (auto const& child_idx : schema_elem.children_idx) { add_path(curr_path + ".", child_idx); } }; for (auto const& child_idx : get_schema(0).children_idx) { add_path("", child_idx); } // Find which of the selected paths are valid and get their schema index std::vector<path_info> valid_selected_paths; for (auto const& selected_path : *use_names) { auto found_path = std::find_if(all_paths.begin(), all_paths.end(), [&](path_info& valid_path) { return valid_path.full_path == selected_path; }); if (found_path != all_paths.end()) { valid_selected_paths.push_back({selected_path, found_path->schema_idx}); } } // Now construct paths as vector of strings for further consumption std::vector<std::vector<std::string>> use_names3; std::transform(valid_selected_paths.cbegin(), valid_selected_paths.cend(), std::back_inserter(use_names3), [&](path_info const& valid_path) { auto schema_idx = valid_path.schema_idx; std::vector<std::string> result_path; do { SchemaElement const& elem = get_schema(schema_idx); result_path.push_back(elem.name); schema_idx = elem.parent_idx; } while (schema_idx > 0); return std::vector<std::string>(result_path.rbegin(), result_path.rend()); }); std::vector<column_name_info> selected_columns; if (include_index) { std::vector<std::string> index_names = get_pandas_index_names(); std::transform(index_names.cbegin(), index_names.cend(), std::back_inserter(selected_columns), [](std::string const& name) { return column_name_info(name); }); } // Merge the vector use_names into a set of hierarchical column_name_info objects /* This is because if we have columns like this: * col1 * / \ * s3 f4 * / \ * f5 f6 * * there may be common paths in use_names like: * {"col1", "s3", "f5"}, {"col1", "f4"} * which means we want the output to contain * col1 * / \ * s3 f4 * / * f5 * * rather than * col1 col1 * | | * s3 f4 * | * f5 */ for (auto const& path : use_names3) { auto array_to_find_in = &selected_columns; for (size_t depth = 0; depth < path.size(); ++depth) { // Check if the path exists in our selected_columns and if not, add it. auto const& name_to_find = path[depth]; auto found_col = std::find_if( array_to_find_in->begin(), array_to_find_in->end(), [&name_to_find](column_name_info const& col) { return col.name == name_to_find; }); if (found_col == array_to_find_in->end()) { auto& col = array_to_find_in->emplace_back(name_to_find); array_to_find_in = &col.children; } else { // Path exists. go down further. array_to_find_in = &found_col->children; } } } for (auto& col : selected_columns) { auto const& top_level_col_schema_idx = find_schema_child(root, col.name); bool valid_column = build_column(&col, top_level_col_schema_idx, output_columns, false); if (valid_column) output_column_schemas.push_back(top_level_col_schema_idx); } } return std::make_tuple( std::move(input_columns), std::move(output_columns), std::move(output_column_schemas)); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/predicate_pushdown.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl_helpers.hpp" #include <cudf/ast/detail/expression_transformer.hpp> #include <cudf/ast/detail/operators.hpp> #include <cudf/ast/expressions.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/transform.hpp> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <algorithm> #include <list> #include <numeric> #include <optional> namespace cudf::io::parquet::detail { namespace { /** * @brief Converts statistics in column chunks to 2 device columns - min, max values. * */ struct stats_caster { size_type total_row_groups; std::vector<metadata> const& per_file_metadata; host_span<std::vector<size_type> const> row_group_indices; template <typename ToType, typename FromType> static ToType targetType(FromType const value) { if constexpr (cudf::is_timestamp<ToType>()) { return static_cast<ToType>( typename ToType::duration{static_cast<typename ToType::rep>(value)}); } else if constexpr (std::is_same_v<ToType, string_view>) { return ToType{nullptr, 0}; } else { return static_cast<ToType>(value); } } // uses storage type as T template <typename T, CUDF_ENABLE_IF(cudf::is_dictionary<T>() or cudf::is_nested<T>())> static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { CUDF_FAIL("unsupported type for stats casting"); } template <typename T, CUDF_ENABLE_IF(cudf::is_boolean<T>())> static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { CUDF_EXPECTS(type == BOOLEAN, "Invalid type and stats combination"); return targetType<T>(*reinterpret_cast<bool const*>(stats_val)); } // integral but not boolean, and fixed_point, and chrono. template <typename T, CUDF_ENABLE_IF((cudf::is_integral<T>() and !cudf::is_boolean<T>()) or cudf::is_fixed_point<T>() or cudf::is_chrono<T>())> static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case INT32: return targetType<T>(*reinterpret_cast<int32_t const*>(stats_val)); case INT64: return targetType<T>(*reinterpret_cast<int64_t const*>(stats_val)); case INT96: // Deprecated in parquet specification return targetType<T>(static_cast<__int128_t>(reinterpret_cast<int64_t const*>(stats_val)[0]) << 32 | reinterpret_cast<int32_t const*>(stats_val)[2]); case BYTE_ARRAY: [[fallthrough]]; case FIXED_LEN_BYTE_ARRAY: if (stats_size == sizeof(T)) { // if type size == length of stats_val. then typecast and return. if constexpr (cudf::is_chrono<T>()) { return targetType<T>(*reinterpret_cast<typename T::rep const*>(stats_val)); } else { return targetType<T>(*reinterpret_cast<T const*>(stats_val)); } } // unsupported type default: CUDF_FAIL("Invalid type and stats combination"); } } template <typename T, CUDF_ENABLE_IF(cudf::is_floating_point<T>())> static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case FLOAT: return targetType<T>(*reinterpret_cast<float const*>(stats_val)); case DOUBLE: return targetType<T>(*reinterpret_cast<double const*>(stats_val)); default: CUDF_FAIL("Invalid type and stats combination"); } } template <typename T, CUDF_ENABLE_IF(std::is_same_v<T, string_view>)> static T convert(uint8_t const* stats_val, size_t stats_size, Type const type) { switch (type) { case BYTE_ARRAY: [[fallthrough]]; case FIXED_LEN_BYTE_ARRAY: return string_view(reinterpret_cast<char const*>(stats_val), stats_size); default: CUDF_FAIL("Invalid type and stats combination"); } } // Creates device columns from column statistics (min, max) template <typename T> std::pair<std::unique_ptr<column>, std::unique_ptr<column>> operator()( size_t col_idx, cudf::data_type dtype, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { // List, Struct, Dictionary types are not supported if constexpr (cudf::is_compound<T>() && !std::is_same_v<T, string_view>) { CUDF_FAIL("Compound types do not have statistics"); } else { // Local struct to hold host columns struct host_column { // using thrust::host_vector because std::vector<bool> uses bitmap instead of byte per bool. thrust::host_vector<T> val; std::vector<bitmask_type> null_mask; cudf::size_type null_count = 0; host_column(size_type total_row_groups) : val(total_row_groups), null_mask( cudf::util::div_rounding_up_safe<size_type>( cudf::bitmask_allocation_size_bytes(total_row_groups), sizeof(bitmask_type)), ~bitmask_type{0}) { } void set_index(size_type index, thrust::optional<std::vector<uint8_t>> const& binary_value, Type const type) { if (binary_value.has_value()) { val[index] = convert<T>(binary_value.value().data(), binary_value.value().size(), type); } if (not binary_value.has_value()) { clear_bit_unsafe(null_mask.data(), index); null_count++; } } static auto make_strings_children(host_span<string_view> host_strings, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::vector<char> chars{}; std::vector<cudf::size_type> offsets(1, 0); for (auto const& str : host_strings) { auto tmp = str.empty() ? std::string_view{} : std::string_view(str.data(), str.size_bytes()); chars.insert(chars.end(), std::cbegin(tmp), std::cend(tmp)); offsets.push_back(offsets.back() + tmp.length()); } auto d_chars = cudf::detail::make_device_uvector_async(chars, stream, mr); auto d_offsets = cudf::detail::make_device_uvector_sync(offsets, stream, mr); return std::tuple{std::move(d_chars), std::move(d_offsets)}; } auto to_device(cudf::data_type dtype, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if constexpr (std::is_same_v<T, string_view>) { auto [d_chars, d_offsets] = make_strings_children(val, stream, mr); return cudf::make_strings_column( val.size(), std::move(d_offsets), std::move(d_chars), rmm::device_buffer{ null_mask.data(), cudf::bitmask_allocation_size_bytes(val.size()), stream, mr}, null_count); } return std::make_unique<column>( dtype, val.size(), cudf::detail::make_device_uvector_async(val, stream, mr).release(), rmm::device_buffer{ null_mask.data(), cudf::bitmask_allocation_size_bytes(val.size()), stream, mr}, null_count); } }; // local struct host_column host_column min(total_row_groups); host_column max(total_row_groups); size_type stats_idx = 0; for (size_t src_idx = 0; src_idx < row_group_indices.size(); ++src_idx) { for (auto const rg_idx : row_group_indices[src_idx]) { auto const& row_group = per_file_metadata[src_idx].row_groups[rg_idx]; auto const& colchunk = row_group.columns[col_idx]; // To support deprecated min, max fields. auto const& min_value = colchunk.meta_data.statistics.min_value.has_value() ? colchunk.meta_data.statistics.min_value : colchunk.meta_data.statistics.min; auto const& max_value = colchunk.meta_data.statistics.max_value.has_value() ? colchunk.meta_data.statistics.max_value : colchunk.meta_data.statistics.max; // translate binary data to Type then to <T> min.set_index(stats_idx, min_value, colchunk.meta_data.type); max.set_index(stats_idx, max_value, colchunk.meta_data.type); stats_idx++; } }; return {min.to_device(dtype, stream, mr), max.to_device(dtype, stream, mr)}; } } }; /** * @brief Converts AST expression to StatsAST for comparing with column statistics * This is used in row group filtering based on predicate. * statistics min value of a column is referenced by column_index*2 * statistics max value of a column is referenced by column_index*2+1 * */ class stats_expression_converter : public ast::detail::expression_transformer { public: stats_expression_converter(ast::expression const& expr, size_type const& num_columns) : _num_columns{num_columns} { expr.accept(*this); } /** * @copydoc ast::detail::expression_transformer::visit(ast::literal const& ) */ std::reference_wrapper<ast::expression const> visit(ast::literal const& expr) override { _stats_expr = std::reference_wrapper<ast::expression const>(expr); return expr; } /** * @copydoc ast::detail::expression_transformer::visit(ast::column_reference const& ) */ std::reference_wrapper<ast::expression const> visit(ast::column_reference const& expr) override { CUDF_EXPECTS(expr.get_table_source() == ast::table_reference::LEFT, "Statistics AST supports only left table"); CUDF_EXPECTS(expr.get_column_index() < _num_columns, "Column index cannot be more than number of columns in the table"); _stats_expr = std::reference_wrapper<ast::expression const>(expr); return expr; } /** * @copydoc ast::detail::expression_transformer::visit(ast::column_name_reference const& ) */ std::reference_wrapper<ast::expression const> visit( ast::column_name_reference const& expr) override { CUDF_FAIL("Column name reference is not supported in statistics AST"); } /** * @copydoc ast::detail::expression_transformer::visit(ast::operation const& ) */ std::reference_wrapper<ast::expression const> visit(ast::operation const& expr) override { using cudf::ast::ast_operator; auto const operands = expr.get_operands(); auto const op = expr.get_operator(); if (auto* v = dynamic_cast<ast::column_reference const*>(&operands[0].get())) { // First operand should be column reference, second should be literal. CUDF_EXPECTS(cudf::ast::detail::ast_operator_arity(op) == 2, "Only binary operations are supported on column reference"); CUDF_EXPECTS(dynamic_cast<ast::literal const*>(&operands[1].get()) != nullptr, "Second operand of binary operation with column reference must be a literal"); v->accept(*this); auto const col_index = v->get_column_index(); switch (op) { /* transform to stats conditions. op(col, literal) col1 == val --> vmin <= val && vmax >= val col1 != val --> !(vmin == val && vmax == val) col1 > val --> vmax > val col1 < val --> vmin < val col1 >= val --> vmax >= val col1 <= val --> vmin <= val */ case ast_operator::EQUAL: { auto const& vmin = _col_ref.emplace_back(col_index * 2); auto const& vmax = _col_ref.emplace_back(col_index * 2 + 1); auto const& op1 = _operators.emplace_back(ast_operator::LESS_EQUAL, vmin, operands[1].get()); auto const& op2 = _operators.emplace_back(ast_operator::GREATER_EQUAL, vmax, operands[1].get()); _operators.emplace_back(ast::ast_operator::LOGICAL_AND, op1, op2); break; } case ast_operator::NOT_EQUAL: { auto const& vmin = _col_ref.emplace_back(col_index * 2); auto const& vmax = _col_ref.emplace_back(col_index * 2 + 1); auto const& op1 = _operators.emplace_back(ast_operator::NOT_EQUAL, vmin, vmax); auto const& op2 = _operators.emplace_back(ast_operator::NOT_EQUAL, vmax, operands[1].get()); _operators.emplace_back(ast_operator::LOGICAL_OR, op1, op2); break; } case ast_operator::LESS: [[fallthrough]]; case ast_operator::LESS_EQUAL: { auto const& vmin = _col_ref.emplace_back(col_index * 2); _operators.emplace_back(op, vmin, operands[1].get()); break; } case ast_operator::GREATER: [[fallthrough]]; case ast_operator::GREATER_EQUAL: { auto const& vmax = _col_ref.emplace_back(col_index * 2 + 1); _operators.emplace_back(op, vmax, operands[1].get()); break; } default: CUDF_FAIL("Unsupported operation in Statistics AST"); }; } else { auto new_operands = visit_operands(operands); if (cudf::ast::detail::ast_operator_arity(op) == 2) { _operators.emplace_back(op, new_operands.front(), new_operands.back()); } else if (cudf::ast::detail::ast_operator_arity(op) == 1) { _operators.emplace_back(op, new_operands.front()); } } _stats_expr = std::reference_wrapper<ast::expression const>(_operators.back()); return std::reference_wrapper<ast::expression const>(_operators.back()); } /** * @brief Returns the AST to apply on Column chunk statistics. * * @return AST operation expression */ [[nodiscard]] std::reference_wrapper<ast::expression const> get_stats_expr() const { return _stats_expr.value().get(); } private: std::vector<std::reference_wrapper<ast::expression const>> visit_operands( std::vector<std::reference_wrapper<ast::expression const>> operands) { std::vector<std::reference_wrapper<ast::expression const>> transformed_operands; for (auto const& operand : operands) { auto const new_operand = operand.get().accept(*this); transformed_operands.push_back(new_operand); } return transformed_operands; } std::optional<std::reference_wrapper<ast::expression const>> _stats_expr; size_type _num_columns; std::list<ast::column_reference> _col_ref; std::list<ast::operation> _operators; }; } // namespace std::optional<std::vector<std::vector<size_type>>> aggregate_reader_metadata::filter_row_groups( host_span<std::vector<size_type> const> row_group_indices, host_span<data_type const> output_dtypes, std::reference_wrapper<ast::expression const> filter, rmm::cuda_stream_view stream) const { auto mr = rmm::mr::get_current_device_resource(); // Create row group indices. std::vector<std::vector<size_type>> filtered_row_group_indices; std::vector<std::vector<size_type>> all_row_group_indices; host_span<std::vector<size_type> const> input_row_group_indices; if (row_group_indices.empty()) { std::transform(per_file_metadata.cbegin(), per_file_metadata.cend(), std::back_inserter(all_row_group_indices), [](auto const& file_meta) { std::vector<size_type> rg_idx(file_meta.row_groups.size()); std::iota(rg_idx.begin(), rg_idx.end(), 0); return rg_idx; }); input_row_group_indices = host_span<std::vector<size_type> const>(all_row_group_indices); } else { input_row_group_indices = row_group_indices; } auto const total_row_groups = std::accumulate(input_row_group_indices.begin(), input_row_group_indices.end(), 0, [](size_type sum, auto const& per_file_row_groups) { return sum + per_file_row_groups.size(); }); // Converts Column chunk statistics to a table // where min(col[i]) = columns[i*2], max(col[i])=columns[i*2+1] // For each column, it contains #sources * #column_chunks_per_src rows. std::vector<std::unique_ptr<column>> columns; stats_caster stats_col{total_row_groups, per_file_metadata, input_row_group_indices}; for (size_t col_idx = 0; col_idx < output_dtypes.size(); col_idx++) { auto const& dtype = output_dtypes[col_idx]; // Only comparable types except fixed point are supported. if (cudf::is_compound(dtype) && dtype.id() != cudf::type_id::STRING) { // placeholder only for unsupported types. columns.push_back(cudf::make_numeric_column( data_type{cudf::type_id::BOOL8}, total_row_groups, rmm::device_buffer{}, 0, stream, mr)); columns.push_back(cudf::make_numeric_column( data_type{cudf::type_id::BOOL8}, total_row_groups, rmm::device_buffer{}, 0, stream, mr)); continue; } auto [min_col, max_col] = cudf::type_dispatcher<dispatch_storage_type>(dtype, stats_col, col_idx, dtype, stream, mr); columns.push_back(std::move(min_col)); columns.push_back(std::move(max_col)); } auto stats_table = cudf::table(std::move(columns)); // Converts AST to StatsAST with reference to min, max columns in above `stats_table`. stats_expression_converter stats_expr{filter, static_cast<size_type>(output_dtypes.size())}; auto stats_ast = stats_expr.get_stats_expr(); auto predicate_col = cudf::detail::compute_column(stats_table, stats_ast.get(), stream, mr); auto predicate = predicate_col->view(); CUDF_EXPECTS(predicate.type().id() == cudf::type_id::BOOL8, "Filter expression must return a boolean column"); auto num_bitmasks = num_bitmask_words(predicate.size()); std::vector<bitmask_type> host_bitmask(num_bitmasks, ~bitmask_type{0}); if (predicate.nullable()) { CUDF_CUDA_TRY(cudaMemcpyAsync(host_bitmask.data(), predicate.null_mask(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDefault, stream.value())); } auto validity_it = cudf::detail::make_counting_transform_iterator( 0, [bitmask = host_bitmask.data()](auto bit_index) { return bit_is_set(bitmask, bit_index); }); auto is_row_group_required = cudf::detail::make_std_vector_sync( device_span<uint8_t const>(predicate.data<uint8_t>(), predicate.size()), stream); // Return only filtered row groups based on predicate // if all are required or all are nulls, return. if (std::all_of(is_row_group_required.cbegin(), is_row_group_required.cend(), [](auto i) { return bool(i); }) or predicate.null_count() == predicate.size()) { return std::nullopt; } size_type is_required_idx = 0; for (size_t src_idx = 0; src_idx < input_row_group_indices.size(); ++src_idx) { std::vector<size_type> filtered_row_groups; for (auto const rg_idx : input_row_group_indices[src_idx]) { if ((!validity_it[is_required_idx]) || is_row_group_required[is_required_idx]) { filtered_row_groups.push_back(rg_idx); } ++is_required_idx; } filtered_row_group_indices.push_back(std::move(filtered_row_groups)); } return {std::move(filtered_row_group_indices)}; } // convert column named expression to column index reference expression std::reference_wrapper<ast::expression const> named_to_reference_converter::visit( ast::literal const& expr) { _stats_expr = std::reference_wrapper<ast::expression const>(expr); return expr; } std::reference_wrapper<ast::expression const> named_to_reference_converter::visit( ast::column_reference const& expr) { _stats_expr = std::reference_wrapper<ast::expression const>(expr); return expr; } std::reference_wrapper<ast::expression const> named_to_reference_converter::visit( ast::column_name_reference const& expr) { // check if column name is in metadata auto col_index_it = column_name_to_index.find(expr.get_column_name()); if (col_index_it == column_name_to_index.end()) { CUDF_FAIL("Column name not found in metadata"); } auto col_index = col_index_it->second; _col_ref.emplace_back(col_index); _stats_expr = std::reference_wrapper<ast::expression const>(_col_ref.back()); return std::reference_wrapper<ast::expression const>(_col_ref.back()); } std::reference_wrapper<ast::expression const> named_to_reference_converter::visit( ast::operation const& expr) { auto const operands = expr.get_operands(); auto op = expr.get_operator(); auto new_operands = visit_operands(operands); if (cudf::ast::detail::ast_operator_arity(op) == 2) { _operators.emplace_back(op, new_operands.front(), new_operands.back()); } else if (cudf::ast::detail::ast_operator_arity(op) == 1) { _operators.emplace_back(op, new_operands.front()); } _stats_expr = std::reference_wrapper<ast::expression const>(_operators.back()); return std::reference_wrapper<ast::expression const>(_operators.back()); } std::vector<std::reference_wrapper<ast::expression const>> named_to_reference_converter::visit_operands( std::vector<std::reference_wrapper<ast::expression const>> operands) { std::vector<std::reference_wrapper<ast::expression const>> transformed_operands; for (auto const& operand : operands) { auto const new_operand = operand.get().accept(*this); transformed_operands.push_back(new_operand); } return transformed_operands; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/compact_protocol_writer.hpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet.hpp" #include "parquet_common.hpp" #include <algorithm> #include <cstddef> #include <cstdint> #include <string> #include <vector> namespace cudf::io::parquet::detail { /** * @brief Class for parsing Parquet's Thrift Compact Protocol encoded metadata * * This class takes in the Parquet structs and outputs a Thrift-encoded binary blob */ class CompactProtocolWriter { public: CompactProtocolWriter(std::vector<uint8_t>* output) : m_buf(*output) {} size_t write(FileMetaData const&); size_t write(DecimalType const&); size_t write(TimeUnit const&); size_t write(TimeType const&); size_t write(TimestampType const&); size_t write(IntType const&); size_t write(LogicalType const&); size_t write(SchemaElement const&); size_t write(RowGroup const&); size_t write(KeyValue const&); size_t write(ColumnChunk const&); size_t write(ColumnChunkMetaData const&); size_t write(Statistics const&); size_t write(PageLocation const&); size_t write(OffsetIndex const&); size_t write(ColumnOrder const&); protected: std::vector<uint8_t>& m_buf; friend class CompactProtocolFieldWriter; }; class CompactProtocolFieldWriter { CompactProtocolWriter& writer; size_t struct_start_pos; int current_field_value; public: CompactProtocolFieldWriter(CompactProtocolWriter& caller) : writer(caller), struct_start_pos(writer.m_buf.size()), current_field_value(0) { } void put_byte(uint8_t v); void put_byte(uint8_t const* raw, uint32_t len); uint32_t put_uint(uint64_t v); uint32_t put_int(int64_t v); void put_field_header(int f, int cur, int t); inline void field_bool(int field, bool b); inline void field_int8(int field, int8_t val); inline void field_int(int field, int32_t val); inline void field_int(int field, int64_t val); template <typename Enum> inline void field_int_list(int field, std::vector<Enum> const& val); template <typename T> inline void field_struct(int field, T const& val); inline void field_empty_struct(int field); template <typename T> inline void field_struct_list(int field, std::vector<T> const& val); inline size_t value(); inline void field_struct_blob(int field, std::vector<uint8_t> const& val); inline void field_binary(int field, std::vector<uint8_t> const& val); inline void field_string(int field, std::string const& val); inline void field_string_list(int field, std::vector<std::string> const& val); inline int current_field(); inline void set_current_field(int const& field); }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl_chunking.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "reader_impl_helpers.hpp" #include <cudf/types.hpp> namespace cudf::io::parquet::detail { /** * @brief Struct to store file-level data that remains constant for * all passes/chunks in the file. */ struct file_intermediate_data { // all row groups to read std::vector<row_group_info> row_groups{}; // all chunks from the selected row groups. We may end up reading these chunks progressively // instead of all at once std::vector<ColumnChunkDesc> chunks{}; // an array of offsets into _file_itm_data::global_chunks. Each pair of offsets represents // the start/end of the chunks to be loaded for a given pass. std::vector<std::size_t> input_pass_row_group_offsets{}; // row counts per input-pass std::vector<std::size_t> input_pass_row_count{}; // skip_rows/num_rows values for the entire file. these need to be adjusted per-pass because we // may not be visiting every row group that contains these bounds size_t global_skip_rows; size_t global_num_rows; }; /** * @brief Struct to identify the range for each chunk of rows during a chunked reading pass. */ struct chunk_read_info { size_t skip_rows; size_t num_rows; }; /** * @brief Struct to store pass-level data that remains constant for a single pass. */ struct pass_intermediate_data { std::vector<std::unique_ptr<datasource::buffer>> raw_page_data; rmm::device_buffer decomp_page_data; // rowgroup, chunk and page information for the current pass. std::vector<row_group_info> row_groups{}; cudf::detail::hostdevice_vector<ColumnChunkDesc> chunks{}; cudf::detail::hostdevice_vector<PageInfo> pages_info{}; cudf::detail::hostdevice_vector<PageNestingInfo> page_nesting_info{}; cudf::detail::hostdevice_vector<PageNestingDecodeInfo> page_nesting_decode_info{}; rmm::device_uvector<int32_t> page_keys{0, rmm::cuda_stream_default}; rmm::device_uvector<int32_t> page_index{0, rmm::cuda_stream_default}; rmm::device_uvector<string_index_pair> str_dict_index{0, rmm::cuda_stream_default}; std::vector<chunk_read_info> output_chunk_read_info; std::size_t current_output_chunk{0}; rmm::device_buffer level_decode_data{}; int level_type_size{0}; // skip_rows and num_rows values for this particular pass. these may be adjusted values from the // global values stored in file_intermediate_data. size_t skip_rows; size_t num_rows; }; } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_string_utils.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/strings/detail/gather.cuh> namespace cudf::io::parquet::detail { // stole this from cudf/strings/detail/gather.cuh. modified to run on a single string on one warp. // copies from src to dst in 16B chunks per thread. inline __device__ void wideStrcpy(uint8_t* dst, uint8_t const* src, size_t len, uint32_t lane_id) { using cudf::detail::warp_size; using cudf::strings::detail::load_uint4; constexpr size_t out_datatype_size = sizeof(uint4); constexpr size_t in_datatype_size = sizeof(uint); auto const alignment_offset = reinterpret_cast<std::uintptr_t>(dst) % out_datatype_size; uint4* out_chars_aligned = reinterpret_cast<uint4*>(dst - alignment_offset); auto const in_start = src; // Both `out_start_aligned` and `out_end_aligned` are indices into `dst`. // `out_start_aligned` is the first 16B aligned memory location after `dst + 4`. // `out_end_aligned` is the last 16B aligned memory location before `len - 4`. Characters // between `[out_start_aligned, out_end_aligned)` will be copied using uint4. // `dst + 4` and `len - 4` are used instead of `dst` and `len` to avoid // `load_uint4` reading beyond string boundaries. // use signed int since out_end_aligned can be negative. int64_t const out_start_aligned = (in_datatype_size + alignment_offset + out_datatype_size - 1) / out_datatype_size * out_datatype_size - alignment_offset; int64_t const out_end_aligned = (len - in_datatype_size + alignment_offset) / out_datatype_size * out_datatype_size - alignment_offset; for (int64_t ichar = out_start_aligned + lane_id * out_datatype_size; ichar < out_end_aligned; ichar += warp_size * out_datatype_size) { *(out_chars_aligned + (ichar + alignment_offset) / out_datatype_size) = load_uint4((const char*)in_start + ichar); } // Tail logic: copy characters of the current string outside // `[out_start_aligned, out_end_aligned)`. if (out_end_aligned <= out_start_aligned) { // In this case, `[out_start_aligned, out_end_aligned)` is an empty set, and we copy the // entire string. for (int64_t ichar = lane_id; ichar < len; ichar += warp_size) { dst[ichar] = in_start[ichar]; } } else { // Copy characters in range `[0, out_start_aligned)`. if (lane_id < out_start_aligned) { dst[lane_id] = in_start[lane_id]; } // Copy characters in range `[out_end_aligned, len)`. int64_t ichar = out_end_aligned + lane_id; if (ichar < len) { dst[ichar] = in_start[ichar]; } } } /** * @brief char-parallel string copy. */ inline __device__ void ll_strcpy(uint8_t* dst, uint8_t const* src, size_t len, uint32_t lane_id) { using cudf::detail::warp_size; if (len > 64) { wideStrcpy(dst, src, len, lane_id); } else { for (int i = lane_id; i < len; i += warp_size) { dst[i] = src[i]; } } } /** * @brief Perform exclusive scan on an array of any length using a single block of threads. */ template <int block_size> __device__ void block_excl_sum(size_type* arr, size_type length, size_type initial_value) { using block_scan = cub::BlockScan<size_type, block_size>; __shared__ typename block_scan::TempStorage scan_storage; int const t = threadIdx.x; // do a series of block sums, storing results in arr as we go for (int pos = 0; pos < length; pos += block_size) { int const tidx = pos + t; size_type tval = tidx < length ? arr[tidx] : 0; size_type block_sum; block_scan(scan_storage).ExclusiveScan(tval, tval, initial_value, cub::Sum(), block_sum); if (tidx < length) { arr[tidx] = tval; } initial_value += block_sum; } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/compact_protocol_reader.cpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compact_protocol_reader.hpp" #include <algorithm> #include <cstddef> #include <functional> #include <tuple> namespace cudf::io::parquet::detail { /** * @brief Base class for parquet field functors. * * Holds the field value used by all of the specialized functors. */ class parquet_field { private: int _field_val; protected: parquet_field(int f) : _field_val(f) {} public: virtual ~parquet_field() = default; int field() const { return _field_val; } }; /** * @brief Abstract base class for list functors. */ template <typename T> class parquet_field_list : public parquet_field { private: using read_func_type = std::function<bool(uint32_t, CompactProtocolReader*)>; FieldType _expected_type; read_func_type _read_value; protected: std::vector<T>& val; void bind_read_func(read_func_type fn) { _read_value = fn; } parquet_field_list(int f, std::vector<T>& v, FieldType t) : parquet_field(f), _expected_type(t), val(v) { } public: inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_LIST) { return true; } auto const [t, n] = cpr->get_listh(); if (t != _expected_type) { return true; } val.resize(n); for (uint32_t i = 0; i < n; i++) { if (_read_value(i, cpr)) { return true; } } return false; } }; /** * @brief Functor to set value to bool read from CompactProtocolReader * * bool doesn't actually encode a value, we just use the field type to indicate true/false * * @return True if field type is not bool */ class parquet_field_bool : public parquet_field { bool& val; public: parquet_field_bool(int f, bool& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_TRUE && field_type != ST_FLD_FALSE) { return true; } val = field_type == ST_FLD_TRUE; return false; } }; /** * @brief Functor to read a vector of booleans from CompactProtocolReader * * @return True if field types mismatch or if the process of reading a * bool fails */ struct parquet_field_bool_list : public parquet_field_list<bool> { parquet_field_bool_list(int f, std::vector<bool>& v) : parquet_field_list(f, v, ST_FLD_TRUE) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { auto const current_byte = cpr->getb(); if (current_byte != ST_FLD_TRUE && current_byte != ST_FLD_FALSE) { return true; } this->val[i] = current_byte == ST_FLD_TRUE; return false; }; bind_read_func(read_value); } }; /** * @brief Base type for a functor that reads an integer from CompactProtocolReader * * Assuming signed ints since the parquet spec does not use unsigned ints anywhere. * * @return True if there is a type mismatch */ template <typename T, int EXPECTED_TYPE> class parquet_field_int : public parquet_field { static constexpr bool is_byte = std::is_same_v<T, int8_t>; T& val; public: parquet_field_int(int f, T& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if constexpr (is_byte) { val = cpr->getb(); } else { val = cpr->get_zigzag<T>(); } return (field_type != EXPECTED_TYPE); } }; using parquet_field_int8 = parquet_field_int<int8_t, ST_FLD_BYTE>; using parquet_field_int32 = parquet_field_int<int32_t, ST_FLD_I32>; using parquet_field_int64 = parquet_field_int<int64_t, ST_FLD_I64>; /** * @brief Functor to read a vector of integers from CompactProtocolReader * * @return True if field types mismatch or if the process of reading an * integer fails */ template <typename T, FieldType EXPECTED_TYPE> struct parquet_field_int_list : public parquet_field_list<T> { parquet_field_int_list(int f, std::vector<T>& v) : parquet_field_list<T>(f, v, EXPECTED_TYPE) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { this->val[i] = cpr->get_zigzag<T>(); return false; }; this->bind_read_func(read_value); } }; using parquet_field_int64_list = parquet_field_int_list<int64_t, ST_FLD_I64>; /** * @brief Functor to read a string from CompactProtocolReader * * @return True if field type mismatches or if size of string exceeds bounds * of the CompactProtocolReader */ class parquet_field_string : public parquet_field { std::string& val; public: parquet_field_string(int f, std::string& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_BINARY) { return true; } auto const n = cpr->get_u32(); if (n < static_cast<size_t>(cpr->m_end - cpr->m_cur)) { val.assign(reinterpret_cast<char const*>(cpr->m_cur), n); cpr->m_cur += n; return false; } else { return true; } } }; /** * @brief Functor to read a vector of strings from CompactProtocolReader * * @return True if field types mismatch or if the process of reading a * string fails */ struct parquet_field_string_list : public parquet_field_list<std::string> { parquet_field_string_list(int f, std::vector<std::string>& v) : parquet_field_list(f, v, ST_FLD_BINARY) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { auto const l = cpr->get_u32(); if (l < static_cast<size_t>(cpr->m_end - cpr->m_cur)) { this->val[i].assign(reinterpret_cast<char const*>(cpr->m_cur), l); cpr->m_cur += l; } else { return true; } return false; }; bind_read_func(read_value); } }; /** * @brief Functor to set value to enum read from CompactProtocolReader * * @return True if field type is not int32 */ template <typename Enum> class parquet_field_enum : public parquet_field { Enum& val; public: parquet_field_enum(int f, Enum& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { val = static_cast<Enum>(cpr->get_i32()); return (field_type != ST_FLD_I32); } }; /** * @brief Functor to read a vector of enums from CompactProtocolReader * * @return True if field types mismatch or if the process of reading an * enum fails */ template <typename Enum> struct parquet_field_enum_list : public parquet_field_list<Enum> { parquet_field_enum_list(int f, std::vector<Enum>& v) : parquet_field_list<Enum>(f, v, ST_FLD_I32) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { this->val[i] = static_cast<Enum>(cpr->get_i32()); return false; }; this->bind_read_func(read_value); } }; /** * @brief Functor to read a structure from CompactProtocolReader * * @return True if field types mismatch or if the process of reading a * struct fails */ template <typename T> class parquet_field_struct : public parquet_field { T& val; public: parquet_field_struct(int f, T& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { return (field_type != ST_FLD_STRUCT || !(cpr->read(&val))); } }; /** * @brief Functor to read optional structures in unions * * @return True if field types mismatch */ template <typename E, typename T> class parquet_field_union_struct : public parquet_field { E& enum_val; thrust::optional<T>& val; // union structs are always wrapped in std::optional public: parquet_field_union_struct(int f, E& ev, thrust::optional<T>& v) : parquet_field(f), enum_val(ev), val(v) { } inline bool operator()(CompactProtocolReader* cpr, int field_type) { T v; bool const res = parquet_field_struct<T>(field(), v).operator()(cpr, field_type); if (!res) { val = v; enum_val = static_cast<E>(field()); } return res; } }; /** * @brief Functor to read empty structures in unions * * Added to avoid having to define read() functions for empty structs contained in unions. * * @return True if field types mismatch */ template <typename E> class parquet_field_union_enumerator : public parquet_field { E& val; public: parquet_field_union_enumerator(int f, E& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_STRUCT) { return true; } cpr->skip_struct_field(field_type); val = static_cast<E>(field()); return false; } }; /** * @brief Functor to read a vector of structures from CompactProtocolReader * * @return True if field types mismatch or if the process of reading a * struct fails */ template <typename T> struct parquet_field_struct_list : public parquet_field_list<T> { parquet_field_struct_list(int f, std::vector<T>& v) : parquet_field_list<T>(f, v, ST_FLD_STRUCT) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { if (not cpr->read(&this->val[i])) { return true; } return false; }; this->bind_read_func(read_value); } }; /** * @brief Functor to read a binary from CompactProtocolReader * * @return True if field type mismatches or if size of binary exceeds bounds * of the CompactProtocolReader */ class parquet_field_binary : public parquet_field { std::vector<uint8_t>& val; public: parquet_field_binary(int f, std::vector<uint8_t>& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_BINARY) { return true; } auto const n = cpr->get_u32(); if (n <= static_cast<size_t>(cpr->m_end - cpr->m_cur)) { val.resize(n); val.assign(cpr->m_cur, cpr->m_cur + n); cpr->m_cur += n; return false; } else { return true; } } }; /** * @brief Functor to read a vector of binaries from CompactProtocolReader * * @return True if field types mismatch or if the process of reading a * binary fails */ struct parquet_field_binary_list : public parquet_field_list<std::vector<uint8_t>> { parquet_field_binary_list(int f, std::vector<std::vector<uint8_t>>& v) : parquet_field_list(f, v, ST_FLD_BINARY) { auto const read_value = [this](uint32_t i, CompactProtocolReader* cpr) { auto const l = cpr->get_u32(); if (l <= static_cast<size_t>(cpr->m_end - cpr->m_cur)) { val[i].resize(l); val[i].assign(cpr->m_cur, cpr->m_cur + l); cpr->m_cur += l; } else { return true; } return false; }; bind_read_func(read_value); } }; /** * @brief Functor to read a struct from CompactProtocolReader * * @return True if field type mismatches */ class parquet_field_struct_blob : public parquet_field { std::vector<uint8_t>& val; public: parquet_field_struct_blob(int f, std::vector<uint8_t>& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { if (field_type != ST_FLD_STRUCT) { return true; } uint8_t const* const start = cpr->m_cur; cpr->skip_struct_field(field_type); if (cpr->m_cur > start) { val.assign(start, cpr->m_cur - 1); } return false; } }; /** * @brief functor to wrap functors for optional fields */ template <typename T, typename FieldFunctor> class parquet_field_optional : public parquet_field { thrust::optional<T>& val; public: parquet_field_optional(int f, thrust::optional<T>& v) : parquet_field(f), val(v) {} inline bool operator()(CompactProtocolReader* cpr, int field_type) { T v; bool const res = FieldFunctor(field(), v).operator()(cpr, field_type); if (!res) { val = v; } return res; } }; /** * @brief Skips the number of bytes according to the specified struct type * * @param[in] t Struct type enumeration * @param[in] depth Level of struct nesting * * @return True if the struct type is recognized, false otherwise */ bool CompactProtocolReader::skip_struct_field(int t, int depth) { switch (t) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u64(); break; case ST_FLD_BYTE: skip_bytes(1); break; case ST_FLD_DOUBLE: skip_bytes(8); break; case ST_FLD_BINARY: skip_bytes(get_u32()); break; case ST_FLD_LIST: [[fallthrough]]; case ST_FLD_SET: { auto const [t, n] = get_listh(); if (depth > 10) { return false; } for (uint32_t i = 0; i < n; i++) { skip_struct_field(t, depth + 1); } } break; case ST_FLD_STRUCT: for (;;) { int const c = getb(); t = c & 0xf; if (c == 0) { break; } // end of struct if ((c & 0xf0) == 0) { get_i16(); } // field id is not a delta if (depth > 10) { return false; } skip_struct_field(t, depth + 1); } break; default: // printf("unsupported skip for type %d\n", t); break; } return true; } template <int index> struct FunctionSwitchImpl { template <typename... Operator> static inline bool run(CompactProtocolReader* cpr, int field_type, int const& field, std::tuple<Operator...>& ops) { if (field == std::get<index>(ops).field()) { return std::get<index>(ops)(cpr, field_type); } else { return FunctionSwitchImpl<index - 1>::run(cpr, field_type, field, ops); } } }; template <> struct FunctionSwitchImpl<0> { template <typename... Operator> static inline bool run(CompactProtocolReader* cpr, int field_type, int const& field, std::tuple<Operator...>& ops) { if (field == std::get<0>(ops).field()) { return std::get<0>(ops)(cpr, field_type); } else { cpr->skip_struct_field(field_type); return false; } } }; template <typename... Operator> inline bool function_builder(CompactProtocolReader* cpr, std::tuple<Operator...>& op) { constexpr int index = std::tuple_size<std::tuple<Operator...>>::value - 1; int field = 0; while (true) { int const current_byte = cpr->getb(); if (!current_byte) { break; } int const field_delta = current_byte >> 4; int const field_type = current_byte & 0xf; field = field_delta ? field + field_delta : cpr->get_i16(); bool const exit_function = FunctionSwitchImpl<index>::run(cpr, field_type, field, op); if (exit_function) { return false; } } return true; } bool CompactProtocolReader::read(FileMetaData* f) { using optional_list_column_order = parquet_field_optional<std::vector<ColumnOrder>, parquet_field_struct_list<ColumnOrder>>; auto op = std::make_tuple(parquet_field_int32(1, f->version), parquet_field_struct_list(2, f->schema), parquet_field_int64(3, f->num_rows), parquet_field_struct_list(4, f->row_groups), parquet_field_struct_list(5, f->key_value_metadata), parquet_field_string(6, f->created_by), optional_list_column_order(7, f->column_orders)); return function_builder(this, op); } bool CompactProtocolReader::read(SchemaElement* s) { using optional_converted_type = parquet_field_optional<ConvertedType, parquet_field_enum<ConvertedType>>; using optional_logical_type = parquet_field_optional<LogicalType, parquet_field_struct<LogicalType>>; auto op = std::make_tuple(parquet_field_enum<Type>(1, s->type), parquet_field_int32(2, s->type_length), parquet_field_enum<FieldRepetitionType>(3, s->repetition_type), parquet_field_string(4, s->name), parquet_field_int32(5, s->num_children), optional_converted_type(6, s->converted_type), parquet_field_int32(7, s->decimal_scale), parquet_field_int32(8, s->decimal_precision), parquet_field_optional<int32_t, parquet_field_int32>(9, s->field_id), optional_logical_type(10, s->logical_type)); return function_builder(this, op); } bool CompactProtocolReader::read(LogicalType* l) { auto op = std::make_tuple( parquet_field_union_enumerator(1, l->type), parquet_field_union_enumerator(2, l->type), parquet_field_union_enumerator(3, l->type), parquet_field_union_enumerator(4, l->type), parquet_field_union_struct<LogicalType::Type, DecimalType>(5, l->type, l->decimal_type), parquet_field_union_enumerator(6, l->type), parquet_field_union_struct<LogicalType::Type, TimeType>(7, l->type, l->time_type), parquet_field_union_struct<LogicalType::Type, TimestampType>(8, l->type, l->timestamp_type), parquet_field_union_struct<LogicalType::Type, IntType>(10, l->type, l->int_type), parquet_field_union_enumerator(11, l->type), parquet_field_union_enumerator(12, l->type), parquet_field_union_enumerator(13, l->type)); return function_builder(this, op); } bool CompactProtocolReader::read(DecimalType* d) { auto op = std::make_tuple(parquet_field_int32(1, d->scale), parquet_field_int32(2, d->precision)); return function_builder(this, op); } bool CompactProtocolReader::read(TimeType* t) { auto op = std::make_tuple(parquet_field_bool(1, t->isAdjustedToUTC), parquet_field_struct(2, t->unit)); return function_builder(this, op); } bool CompactProtocolReader::read(TimestampType* t) { auto op = std::make_tuple(parquet_field_bool(1, t->isAdjustedToUTC), parquet_field_struct(2, t->unit)); return function_builder(this, op); } bool CompactProtocolReader::read(TimeUnit* u) { auto op = std::make_tuple(parquet_field_union_enumerator(1, u->type), parquet_field_union_enumerator(2, u->type), parquet_field_union_enumerator(3, u->type)); return function_builder(this, op); } bool CompactProtocolReader::read(IntType* i) { auto op = std::make_tuple(parquet_field_int8(1, i->bitWidth), parquet_field_bool(2, i->isSigned)); return function_builder(this, op); } bool CompactProtocolReader::read(RowGroup* r) { auto op = std::make_tuple(parquet_field_struct_list(1, r->columns), parquet_field_int64(2, r->total_byte_size), parquet_field_int64(3, r->num_rows)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnChunk* c) { auto op = std::make_tuple(parquet_field_string(1, c->file_path), parquet_field_int64(2, c->file_offset), parquet_field_struct(3, c->meta_data), parquet_field_int64(4, c->offset_index_offset), parquet_field_int32(5, c->offset_index_length), parquet_field_int64(6, c->column_index_offset), parquet_field_int32(7, c->column_index_length)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnChunkMetaData* c) { auto op = std::make_tuple(parquet_field_enum<Type>(1, c->type), parquet_field_enum_list(2, c->encodings), parquet_field_string_list(3, c->path_in_schema), parquet_field_enum<Compression>(4, c->codec), parquet_field_int64(5, c->num_values), parquet_field_int64(6, c->total_uncompressed_size), parquet_field_int64(7, c->total_compressed_size), parquet_field_int64(9, c->data_page_offset), parquet_field_int64(10, c->index_page_offset), parquet_field_int64(11, c->dictionary_page_offset), parquet_field_struct(12, c->statistics)); return function_builder(this, op); } bool CompactProtocolReader::read(PageHeader* p) { auto op = std::make_tuple(parquet_field_enum<PageType>(1, p->type), parquet_field_int32(2, p->uncompressed_page_size), parquet_field_int32(3, p->compressed_page_size), parquet_field_struct(5, p->data_page_header), parquet_field_struct(7, p->dictionary_page_header), parquet_field_struct(8, p->data_page_header_v2)); return function_builder(this, op); } bool CompactProtocolReader::read(DataPageHeader* d) { auto op = std::make_tuple(parquet_field_int32(1, d->num_values), parquet_field_enum<Encoding>(2, d->encoding), parquet_field_enum<Encoding>(3, d->definition_level_encoding), parquet_field_enum<Encoding>(4, d->repetition_level_encoding)); return function_builder(this, op); } bool CompactProtocolReader::read(DictionaryPageHeader* d) { auto op = std::make_tuple(parquet_field_int32(1, d->num_values), parquet_field_enum<Encoding>(2, d->encoding)); return function_builder(this, op); } bool CompactProtocolReader::read(DataPageHeaderV2* d) { auto op = std::make_tuple(parquet_field_int32(1, d->num_values), parquet_field_int32(2, d->num_nulls), parquet_field_int32(3, d->num_rows), parquet_field_enum<Encoding>(4, d->encoding), parquet_field_int32(5, d->definition_levels_byte_length), parquet_field_int32(6, d->repetition_levels_byte_length), parquet_field_bool(7, d->is_compressed)); return function_builder(this, op); } bool CompactProtocolReader::read(KeyValue* k) { auto op = std::make_tuple(parquet_field_string(1, k->key), parquet_field_string(2, k->value)); return function_builder(this, op); } bool CompactProtocolReader::read(PageLocation* p) { auto op = std::make_tuple(parquet_field_int64(1, p->offset), parquet_field_int32(2, p->compressed_page_size), parquet_field_int64(3, p->first_row_index)); return function_builder(this, op); } bool CompactProtocolReader::read(OffsetIndex* o) { auto op = std::make_tuple(parquet_field_struct_list(1, o->page_locations)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnIndex* c) { auto op = std::make_tuple(parquet_field_bool_list(1, c->null_pages), parquet_field_binary_list(2, c->min_values), parquet_field_binary_list(3, c->max_values), parquet_field_enum<BoundaryOrder>(4, c->boundary_order), parquet_field_int64_list(5, c->null_counts)); return function_builder(this, op); } bool CompactProtocolReader::read(Statistics* s) { using optional_binary = parquet_field_optional<std::vector<uint8_t>, parquet_field_binary>; using optional_int64 = parquet_field_optional<int64_t, parquet_field_int64>; auto op = std::make_tuple(optional_binary(1, s->max), optional_binary(2, s->min), optional_int64(3, s->null_count), optional_int64(4, s->distinct_count), optional_binary(5, s->max_value), optional_binary(6, s->min_value)); return function_builder(this, op); } bool CompactProtocolReader::read(ColumnOrder* c) { auto op = std::make_tuple(parquet_field_union_enumerator<ColumnOrder::Type>(1, c->type)); return function_builder(this, op); } /** * @brief Constructs the schema from the file-level metadata * * @param[in] md File metadata that was previously parsed * * @return True if schema constructed completely, false otherwise */ bool CompactProtocolReader::InitSchema(FileMetaData* md) { if (static_cast<std::size_t>(WalkSchema(md)) != md->schema.size()) { return false; } /* Inside FileMetaData, there is a std::vector of RowGroups and each RowGroup contains a * a std::vector of ColumnChunks. Each ColumnChunk has a member ColumnMetaData, which contains * a std::vector of std::strings representing paths. The purpose of the code below is to set the * schema_idx of each column of each row to it corresponding row_group. This is effectively * mapping the columns to the schema. */ for (auto& row_group : md->row_groups) { int current_schema_index = 0; for (auto& column : row_group.columns) { int parent = 0; // root of schema for (auto const& path : column.meta_data.path_in_schema) { auto const it = [&] { // find_if starting at (current_schema_index + 1) and then wrapping auto const schema = [&](auto const& e) { return e.parent_idx == parent && e.name == path; }; auto const mid = md->schema.cbegin() + current_schema_index + 1; auto const it = std::find_if(mid, md->schema.cend(), schema); if (it != md->schema.cend()) { return it; } return std::find_if(md->schema.cbegin(), mid, schema); }(); if (it == md->schema.cend()) { return false; } current_schema_index = std::distance(md->schema.cbegin(), it); column.schema_idx = current_schema_index; parent = current_schema_index; } } } return true; } /** * @brief Populates each node in the schema tree * * @param[out] md File metadata * @param[in] idx Current node index * @param[in] parent_idx Parent node index * @param[in] max_def_level Max definition level * @param[in] max_rep_level Max repetition level * * @return The node index that was populated */ int CompactProtocolReader::WalkSchema( FileMetaData* md, int idx, int parent_idx, int max_def_level, int max_rep_level) { if (idx >= 0 && (size_t)idx < md->schema.size()) { SchemaElement* e = &md->schema[idx]; if (e->repetition_type == OPTIONAL) { ++max_def_level; } else if (e->repetition_type == REPEATED) { ++max_def_level; ++max_rep_level; } e->max_definition_level = max_def_level; e->max_repetition_level = max_rep_level; e->parent_idx = parent_idx; parent_idx = idx; ++idx; if (e->num_children > 0) { for (int i = 0; i < e->num_children; i++) { e->children_idx.push_back(idx); int const idx_old = idx; idx = WalkSchema(md, idx, parent_idx, max_def_level, max_rep_level); if (idx <= idx_old) { break; } // Error } } return idx; } else { // Error return -1; } } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/chunk_dict.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.cuh" #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/table/experimental/row_operators.cuh> #include <rmm/exec_policy.hpp> #include <cuda/atomic> namespace cudf::io::parquet::detail { namespace { constexpr int DEFAULT_BLOCK_SIZE = 256; } template <int block_size> __global__ void __launch_bounds__(block_size) initialize_chunk_hash_maps_kernel(device_span<EncColumnChunk> chunks) { auto const chunk = chunks[blockIdx.x]; auto const t = threadIdx.x; // fut: Now that per-chunk dict is same size as ck.num_values, try to not use one block per chunk for (thread_index_type i = 0; i < chunk.dict_map_size; i += block_size) { if (t + i < chunk.dict_map_size) { new (&chunk.dict_map_slots[t + i].first) map_type::atomic_key_type{KEY_SENTINEL}; new (&chunk.dict_map_slots[t + i].second) map_type::atomic_mapped_type{VALUE_SENTINEL}; } } } template <typename T> struct equality_functor { column_device_view const& col; __device__ bool operator()(size_type lhs_idx, size_type rhs_idx) { // We don't call this for nulls so this is fine auto const equal = cudf::experimental::row::equality::nan_equal_physical_equality_comparator{}; return equal(col.element<T>(lhs_idx), col.element<T>(rhs_idx)); } }; template <typename T> struct hash_functor { column_device_view const& col; __device__ auto operator()(size_type idx) const { return cudf::hashing::detail::MurmurHash3_x86_32<T>{}(col.element<T>(idx)); } }; struct map_insert_fn { map_type::device_mutable_view& map; template <typename T> __device__ bool operator()(column_device_view const& col, size_type i) { if constexpr (column_device_view::has_element_accessor<T>()) { auto hash_fn = hash_functor<T>{col}; auto equality_fn = equality_functor<T>{col}; return map.insert(std::pair(i, i), hash_fn, equality_fn); } else { CUDF_UNREACHABLE("Unsupported type to insert in map"); } } }; struct map_find_fn { map_type::device_view& map; template <typename T> __device__ map_type::device_view::iterator operator()(column_device_view const& col, size_type i) { if constexpr (column_device_view::has_element_accessor<T>()) { auto hash_fn = hash_functor<T>{col}; auto equality_fn = equality_functor<T>{col}; return map.find(i, hash_fn, equality_fn); } else { CUDF_UNREACHABLE("Unsupported type to find in map"); } } }; template <int block_size> __global__ void __launch_bounds__(block_size) populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan<PageFragment const> frags) { auto col_idx = blockIdx.y; auto block_x = blockIdx.x; auto t = threadIdx.x; auto frag = frags[col_idx][block_x]; auto chunk = frag.chunk; auto col = chunk->col_desc; if (not chunk->use_dictionary) { return; } using block_reduce = cub::BlockReduce<size_type, block_size>; __shared__ typename block_reduce::TempStorage reduce_storage; size_type start_row = frag.start_row; size_type end_row = frag.start_row + frag.num_rows; // Find the bounds of values in leaf column to be inserted into the map for current chunk size_type const s_start_value_idx = row_to_value_idx(start_row, *col); size_type const end_value_idx = row_to_value_idx(end_row, *col); column_device_view const& data_col = *col->leaf_column; // Make a view of the hash map auto hash_map_mutable = map_type::device_mutable_view(chunk->dict_map_slots, chunk->dict_map_size, cuco::empty_key{KEY_SENTINEL}, cuco::empty_value{VALUE_SENTINEL}); __shared__ size_type total_num_dict_entries; thread_index_type val_idx = s_start_value_idx + t; while (val_idx - block_size < end_value_idx) { auto const is_valid = val_idx < end_value_idx and val_idx < data_col.size() and data_col.is_valid(val_idx); // insert element at val_idx to hash map and count successful insertions size_type is_unique = 0; size_type uniq_elem_size = 0; if (is_valid) { is_unique = type_dispatcher(data_col.type(), map_insert_fn{hash_map_mutable}, data_col, val_idx); uniq_elem_size = [&]() -> size_type { if (not is_unique) { return 0; } switch (col->physical_type) { case Type::INT32: return 4; case Type::INT64: return 8; case Type::INT96: return 12; case Type::FLOAT: return 4; case Type::DOUBLE: return 8; case Type::BYTE_ARRAY: { auto const col_type = data_col.type().id(); if (col_type == type_id::STRING) { // Strings are stored as 4 byte length + string bytes return 4 + data_col.element<string_view>(val_idx).size_bytes(); } else if (col_type == type_id::LIST) { // Binary is stored as 4 byte length + bytes return 4 + get_element<statistics::byte_array_view>(data_col, val_idx).size_bytes(); } CUDF_UNREACHABLE( "Byte array only supports string and list<byte> column types for dictionary " "encoding!"); } case Type::FIXED_LEN_BYTE_ARRAY: if (data_col.type().id() == type_id::DECIMAL128) { return sizeof(__int128_t); } CUDF_UNREACHABLE( "Fixed length byte array only supports decimal 128 column types for dictionary " "encoding!"); default: CUDF_UNREACHABLE("Unsupported type for dictionary encoding"); } }(); } auto num_unique = block_reduce(reduce_storage).Sum(is_unique); __syncthreads(); auto uniq_data_size = block_reduce(reduce_storage).Sum(uniq_elem_size); if (t == 0) { total_num_dict_entries = atomicAdd(&chunk->num_dict_entries, num_unique); total_num_dict_entries += num_unique; atomicAdd(&chunk->uniq_data_size, uniq_data_size); } __syncthreads(); // Check if the num unique values in chunk has already exceeded max dict size and early exit if (total_num_dict_entries > MAX_DICT_SIZE) { return; } val_idx += block_size; } // while } template <int block_size> __global__ void __launch_bounds__(block_size) collect_map_entries_kernel(device_span<EncColumnChunk> chunks) { auto& chunk = chunks[blockIdx.x]; if (not chunk.use_dictionary) { return; } auto t = threadIdx.x; auto map = map_type::device_view(chunk.dict_map_slots, chunk.dict_map_size, cuco::empty_key{KEY_SENTINEL}, cuco::empty_value{VALUE_SENTINEL}); __shared__ cuda::atomic<size_type, cuda::thread_scope_block> counter; using cuda::std::memory_order_relaxed; if (t == 0) { new (&counter) cuda::atomic<size_type, cuda::thread_scope_block>{0}; } __syncthreads(); for (size_type i = 0; i < chunk.dict_map_size; i += block_size) { if (t + i < chunk.dict_map_size) { auto* slot = reinterpret_cast<map_type::value_type*>(map.begin_slot() + t + i); auto key = slot->first; if (key != KEY_SENTINEL) { auto loc = counter.fetch_add(1, memory_order_relaxed); cudf_assert(loc < MAX_DICT_SIZE && "Number of filled slots exceeds max dict size"); chunk.dict_data[loc] = key; // If sorting dict page ever becomes a hard requirement, enable the following statement and // add a dict sorting step before storing into the slot's second field. // chunk.dict_data_idx[loc] = t + i; slot->second = loc; } } } } template <int block_size> __global__ void __launch_bounds__(block_size) get_dictionary_indices_kernel(cudf::detail::device_2dspan<PageFragment const> frags) { auto col_idx = blockIdx.y; auto block_x = blockIdx.x; auto t = threadIdx.x; auto frag = frags[col_idx][block_x]; auto chunk = frag.chunk; auto col = chunk->col_desc; if (not chunk->use_dictionary) { return; } size_type start_row = frag.start_row; size_type end_row = frag.start_row + frag.num_rows; // Find the bounds of values in leaf column to be searched in the map for current chunk auto const s_start_value_idx = row_to_value_idx(start_row, *col); auto const s_ck_start_val_idx = row_to_value_idx(chunk->start_row, *col); auto const end_value_idx = row_to_value_idx(end_row, *col); column_device_view const& data_col = *col->leaf_column; auto map = map_type::device_view(chunk->dict_map_slots, chunk->dict_map_size, cuco::empty_key{KEY_SENTINEL}, cuco::empty_value{VALUE_SENTINEL}); thread_index_type val_idx = s_start_value_idx + t; while (val_idx < end_value_idx) { if (data_col.is_valid(val_idx)) { auto found_slot = type_dispatcher(data_col.type(), map_find_fn{map}, data_col, val_idx); cudf_assert(found_slot != map.end() && "Unable to find value in map in dictionary index construction"); if (found_slot != map.end()) { // No need for atomic as this is not going to be modified by any other thread auto* val_ptr = reinterpret_cast<map_type::mapped_type*>(&found_slot->second); chunk->dict_index[val_idx - s_ck_start_val_idx] = *val_ptr; } } val_idx += block_size; } } void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream) { constexpr int block_size = 1024; initialize_chunk_hash_maps_kernel<block_size> <<<chunks.size(), block_size, 0, stream.value()>>>(chunks); } void populate_chunk_hash_maps(cudf::detail::device_2dspan<PageFragment const> frags, rmm::cuda_stream_view stream) { dim3 const dim_grid(frags.size().second, frags.size().first); populate_chunk_hash_maps_kernel<DEFAULT_BLOCK_SIZE> <<<dim_grid, DEFAULT_BLOCK_SIZE, 0, stream.value()>>>(frags); } void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream) { constexpr int block_size = 1024; collect_map_entries_kernel<block_size><<<chunks.size(), block_size, 0, stream.value()>>>(chunks); } void get_dictionary_indices(cudf::detail::device_2dspan<PageFragment const> frags, rmm::cuda_stream_view stream) { dim3 const dim_grid(frags.size().second, frags.size().first); get_dictionary_indices_kernel<DEFAULT_BLOCK_SIZE> <<<dim_grid, DEFAULT_BLOCK_SIZE, 0, stream.value()>>>(frags); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/parquet.hpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "parquet_common.hpp" #include <cudf/types.hpp> #include <thrust/optional.h> #include <cstdint> #include <optional> #include <string> #include <vector> namespace cudf::io::parquet::detail { constexpr uint32_t parquet_magic = (('P' << 0) | ('A' << 8) | ('R' << 16) | ('1' << 24)); /** * @brief Struct that describes the Parquet file data header */ struct file_header_s { uint32_t magic; }; /** * @brief Struct that describes the Parquet file data postscript */ struct file_ender_s { uint32_t footer_len; uint32_t magic; }; // thrift inspired code simplified. struct DecimalType { int32_t scale = 0; int32_t precision = 0; }; struct TimeUnit { enum Type { UNDEFINED, MILLIS, MICROS, NANOS }; Type type; }; struct TimeType { // Default to true because the timestamps are implicitly in UTC // Writer option overrides this default bool isAdjustedToUTC = true; TimeUnit unit = {TimeUnit::MILLIS}; }; struct TimestampType { // Default to true because the timestamps are implicitly in UTC // Writer option overrides this default bool isAdjustedToUTC = true; TimeUnit unit = {TimeUnit::MILLIS}; }; struct IntType { int8_t bitWidth = 0; bool isSigned = false; }; struct LogicalType { enum Type { UNDEFINED, STRING, MAP, LIST, ENUM, DECIMAL, DATE, TIME, TIMESTAMP, // 9 is reserved INTEGER = 10, UNKNOWN, JSON, BSON }; Type type; thrust::optional<DecimalType> decimal_type; thrust::optional<TimeType> time_type; thrust::optional<TimestampType> timestamp_type; thrust::optional<IntType> int_type; LogicalType(Type tp = UNDEFINED) : type(tp) {} LogicalType(DecimalType&& dt) : type(DECIMAL), decimal_type(dt) {} LogicalType(TimeType&& tt) : type(TIME), time_type(tt) {} LogicalType(TimestampType&& tst) : type(TIMESTAMP), timestamp_type(tst) {} LogicalType(IntType&& it) : type(INTEGER), int_type(it) {} constexpr bool is_time_millis() const { return type == TIME and time_type->unit.type == TimeUnit::MILLIS; } constexpr bool is_time_micros() const { return type == TIME and time_type->unit.type == TimeUnit::MICROS; } constexpr bool is_time_nanos() const { return type == TIME and time_type->unit.type == TimeUnit::NANOS; } constexpr bool is_timestamp_millis() const { return type == TIMESTAMP and timestamp_type->unit.type == TimeUnit::MILLIS; } constexpr bool is_timestamp_micros() const { return type == TIMESTAMP and timestamp_type->unit.type == TimeUnit::MICROS; } constexpr bool is_timestamp_nanos() const { return type == TIMESTAMP and timestamp_type->unit.type == TimeUnit::NANOS; } constexpr int8_t bit_width() const { return type == INTEGER ? int_type->bitWidth : -1; } constexpr bool is_signed() const { return type == INTEGER and int_type->isSigned; } constexpr int32_t scale() const { return type == DECIMAL ? decimal_type->scale : -1; } constexpr int32_t precision() const { return type == DECIMAL ? decimal_type->precision : -1; } }; /** * Union to specify the order used for the min_value and max_value fields for a column. */ struct ColumnOrder { enum Type { UNDEFINED, TYPE_ORDER }; Type type; }; /** * @brief Struct for describing an element/field in the Parquet format schema * * Parquet is a strongly-typed format so the file layout can be interpreted as * as a schema tree. */ struct SchemaElement { // 1: parquet physical type for output Type type = UNDEFINED_TYPE; // 2: byte length of FIXED_LENGTH_BYTE_ARRAY elements, or maximum bit length for other types int32_t type_length = 0; // 3: repetition of the field FieldRepetitionType repetition_type = REQUIRED; // 4: name of the field std::string name = ""; // 5: nested fields int32_t num_children = 0; // 6: DEPRECATED: record the original type before conversion to parquet type thrust::optional<ConvertedType> converted_type; // 7: DEPRECATED: record the scale for DECIMAL converted type int32_t decimal_scale = 0; // 8: DEPRECATED: record the precision for DECIMAL converted type int32_t decimal_precision = 0; // 9: save field_id from original schema thrust::optional<int32_t> field_id; // 10: replaces converted type thrust::optional<LogicalType> logical_type; // extra cudf specific fields bool output_as_byte_array = false; // The following fields are filled in later during schema initialization int max_definition_level = 0; int max_repetition_level = 0; size_type parent_idx = 0; std::vector<size_type> children_idx; bool operator==(SchemaElement const& other) const { return type == other.type && converted_type == other.converted_type && type_length == other.type_length && repetition_type == other.repetition_type && name == other.name && num_children == other.num_children && decimal_scale == other.decimal_scale && decimal_precision == other.decimal_precision && field_id == other.field_id; } // the parquet format is a little squishy when it comes to interpreting // repeated fields. sometimes repeated fields act as "stubs" in the schema // that don't represent a true nesting level. // // this is the case with plain lists: // // optional group my_list (LIST) { // repeated group element { <-- not part of the output hierarchy // required binary str (UTF8); // }; // } // // However, for backwards compatibility reasons, there are a few special cases, namely // List<Struct<>> (which also corresponds to how the map type is specified), where // this does not hold true // // optional group my_list (LIST) { // repeated group element { <-- part of the hierarchy because it represents a struct // required binary str (UTF8); // required int32 num; // }; // } [[nodiscard]] bool is_stub() const { return repetition_type == REPEATED && num_children == 1; } // https://github.com/apache/parquet-cpp/blob/642da05/src/parquet/schema.h#L49-L50 // One-level LIST encoding: Only allows required lists with required cells: // repeated value_type name [[nodiscard]] bool is_one_level_list(SchemaElement const& parent) const { return repetition_type == REPEATED and num_children == 0 and not parent.is_list(); } // returns true if the element is a list [[nodiscard]] bool is_list() const { return converted_type == LIST; } // in parquet terms, a group is a level of nesting in the schema. a group // can be a struct or a list [[nodiscard]] bool is_struct() const { return type == UNDEFINED_TYPE && // this assumption might be a little weak. ((repetition_type != REPEATED) || (repetition_type == REPEATED && num_children > 1)); } }; /** * @brief Thrift-derived struct describing column chunk statistics */ struct Statistics { // deprecated max value in signed comparison order thrust::optional<std::vector<uint8_t>> max; // deprecated min value in signed comparison order thrust::optional<std::vector<uint8_t>> min; // count of null values in the column thrust::optional<int64_t> null_count; // count of distinct values occurring thrust::optional<int64_t> distinct_count; // max value for column determined by ColumnOrder thrust::optional<std::vector<uint8_t>> max_value; // min value for column determined by ColumnOrder thrust::optional<std::vector<uint8_t>> min_value; }; /** * @brief Thrift-derived struct describing a column chunk */ struct ColumnChunkMetaData { Type type = BOOLEAN; std::vector<Encoding> encodings; std::vector<std::string> path_in_schema; Compression codec = UNCOMPRESSED; int64_t num_values = 0; int64_t total_uncompressed_size = 0; // total byte size of all uncompressed pages in this column chunk (including the headers) int64_t total_compressed_size = 0; // total byte size of all compressed pages in this column chunk (including the headers) int64_t data_page_offset = 0; // Byte offset from beginning of file to first data page int64_t index_page_offset = 0; // Byte offset from beginning of file to root index page int64_t dictionary_page_offset = 0; // Byte offset from the beginning of file to first (only) dictionary page Statistics statistics; // Encoded chunk-level statistics }; /** * @brief Thrift-derived struct describing a chunk of data for a particular * column * * Each column chunk lives in a particular row group and are guaranteed to be * contiguous in the file. Any missing or corrupted chunks can be skipped during * reading. */ struct ColumnChunk { std::string file_path = ""; int64_t file_offset = 0; ColumnChunkMetaData meta_data; int64_t offset_index_offset = 0; // File offset of ColumnChunk's OffsetIndex int32_t offset_index_length = 0; // Size of ColumnChunk's OffsetIndex, in bytes int64_t column_index_offset = 0; // File offset of ColumnChunk's ColumnIndex int32_t column_index_length = 0; // Size of ColumnChunk's ColumnIndex, in bytes // Following fields are derived from other fields int schema_idx = -1; // Index in flattened schema (derived from path_in_schema) }; /** * @brief Thrift-derived struct describing a group of row data * * There may be one or more row groups within a dataset, with each row group * consisting of a column chunk for each column. */ struct RowGroup { int64_t total_byte_size = 0; std::vector<ColumnChunk> columns; int64_t num_rows = 0; }; /** * @brief Thrift-derived struct describing a key-value pair, for user metadata */ struct KeyValue { std::string key; std::string value; }; /** * @brief Thrift-derived struct describing file-level metadata * * The additional information stored in the key_value_metadata can be used * during reading to reconstruct the output data to the exact original dataset * prior to conversion to Parquet. */ struct FileMetaData { int32_t version = 0; std::vector<SchemaElement> schema; int64_t num_rows = 0; std::vector<RowGroup> row_groups; std::vector<KeyValue> key_value_metadata; std::string created_by = ""; thrust::optional<std::vector<ColumnOrder>> column_orders; }; /** * @brief Thrift-derived struct describing the header for a data page */ struct DataPageHeader { int32_t num_values = 0; // Number of values, including NULLs, in this data page. Encoding encoding = Encoding::PLAIN; // Encoding used for this data page Encoding definition_level_encoding = Encoding::PLAIN; // Encoding used for definition levels Encoding repetition_level_encoding = Encoding::PLAIN; // Encoding used for repetition levels }; /** * @brief Thrift-derived struct describing the header for a V2 data page */ struct DataPageHeaderV2 { int32_t num_values = 0; // Number of values, including NULLs, in this data page. int32_t num_nulls = 0; // Number of NULL values, in this data page. int32_t num_rows = 0; // Number of rows in this data page. which means // pages change on record boundaries (r = 0) Encoding encoding = Encoding::PLAIN; // Encoding used for this data page int32_t definition_levels_byte_length = 0; // length of the definition levels int32_t repetition_levels_byte_length = 0; // length of the repetition levels bool is_compressed = true; // whether the values are compressed. }; /** * @brief Thrift-derived struct describing the header for a dictionary page */ struct DictionaryPageHeader { int32_t num_values = 0; // Number of values in the dictionary Encoding encoding = Encoding::PLAIN; // Encoding using this dictionary page }; /** * @brief Thrift-derived struct describing the page header * * Column data are divided into individual chunks, which are subdivided into * pages. Each page has an associated header, describing the page type. There * can be multiple page types interleaved in a column chunk, and each page is * individually compressed and encoded. Any missing or corrupted pages can be * skipped during reading. */ struct PageHeader { PageType type = PageType::DATA_PAGE; // the type of the page: indicates which of the *_header fields is set int32_t uncompressed_page_size = 0; // Uncompressed page size in bytes (not including the header) int32_t compressed_page_size = 0; // Compressed page size in bytes (not including the header) DataPageHeader data_page_header; DictionaryPageHeader dictionary_page_header; DataPageHeaderV2 data_page_header_v2; }; /** * @brief Thrift-derived struct describing page location information stored * in the offsets index. */ struct PageLocation { int64_t offset; // Offset of the page in the file int32_t compressed_page_size; // Compressed page size in bytes plus the heeader length int64_t first_row_index; // Index within the column chunk of the first row of the page. reset to // 0 at the beginning of each column chunk }; /** * @brief Thrift-derived struct describing the offset index. */ struct OffsetIndex { std::vector<PageLocation> page_locations; }; /** * @brief Thrift-derived struct describing the column index. */ struct ColumnIndex { std::vector<bool> null_pages; // Boolean used to determine if a page contains only null values std::vector<std::vector<uint8_t>> min_values; // lower bound for values in each page std::vector<std::vector<uint8_t>> max_values; // upper bound for values in each page BoundaryOrder boundary_order = BoundaryOrder::UNORDERED; // Indicates if min and max values are ordered std::vector<int64_t> null_counts; // Optional count of null values per page }; // bit space we are reserving in column_buffer::user_data constexpr uint32_t PARQUET_COLUMN_BUFFER_SCHEMA_MASK = (0xff'ffffu); constexpr uint32_t PARQUET_COLUMN_BUFFER_FLAG_LIST_TERMINATED = (1 << 24); // if this column has a list parent anywhere above it in the hierarchy constexpr uint32_t PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT = (1 << 25); /** * @brief Count the number of leading zeros in an unsigned integer */ static inline int CountLeadingZeros32(uint32_t value) { #if defined(__clang__) || defined(__GNUC__) if (value == 0) return 32; return static_cast<int>(__builtin_clz(value)); #else int bitpos = 0; while (value != 0) { value >>= 1; ++bitpos; } return 32 - bitpos; #endif } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/page_enc.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "delta_enc.cuh" #include "parquet_gpu.cuh" #include <io/utilities/block_utils.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/assert.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/stream_pool.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <cub/cub.cuh> #include <cuda/std/chrono> #include <thrust/binary_search.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/merge.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/tuple.h> #include <bitset> namespace cudf::io::parquet::detail { namespace { using ::cudf::detail::device_2dspan; constexpr int encode_block_size = 128; constexpr int rle_buffer_size = 2 * encode_block_size; constexpr int num_encode_warps = encode_block_size / cudf::detail::warp_size; constexpr int rolling_idx(int pos) { return rolling_index<rle_buffer_size>(pos); } // do not truncate statistics constexpr int32_t NO_TRUNC_STATS = 0; // minimum scratch space required for encoding statistics constexpr size_t MIN_STATS_SCRATCH_SIZE = sizeof(__int128_t); // mask to determine lane id constexpr uint32_t WARP_MASK = cudf::detail::warp_size - 1; // currently 64k - 1 constexpr uint32_t MAX_GRID_Y_SIZE = (1 << 16) - 1; // space needed for RLE length field constexpr int RLE_LENGTH_FIELD_LEN = 4; struct frag_init_state_s { parquet_column_device_view col; PageFragment frag; }; template <int rle_buf_size> struct page_enc_state_s { uint8_t* cur; //!< current output ptr uint8_t* rle_out; //!< current RLE write ptr uint8_t* rle_len_pos; //!< position to write RLE length (for V2 boolean data) uint32_t rle_run; //!< current RLE run uint32_t run_val; //!< current RLE run value uint32_t rle_pos; //!< RLE encoder positions uint32_t rle_numvals; //!< RLE input value count uint32_t rle_lit_count; uint32_t rle_rpt_count; uint32_t page_start_val; uint32_t chunk_start_val; uint32_t rpt_map[num_encode_warps]; EncPage page; EncColumnChunk ck; parquet_column_device_view col; uint32_t vals[rle_buf_size]; }; using rle_page_enc_state_s = page_enc_state_s<rle_buffer_size>; /** * @brief Returns the size of the type in the Parquet file. */ constexpr uint32_t physical_type_len(Type physical_type, type_id id) { if (physical_type == FIXED_LEN_BYTE_ARRAY and id == type_id::DECIMAL128) { return sizeof(__int128_t); } switch (physical_type) { case INT96: return 12u; case INT64: case DOUBLE: return sizeof(int64_t); case BOOLEAN: return 1u; default: return sizeof(int32_t); } } constexpr uint32_t max_RLE_page_size(uint8_t value_bit_width, uint32_t num_values) { if (value_bit_width == 0) return 0; // Run length = 4, max(rle/bitpack header) = 5. bitpacking worst case is one byte every 8 values // (because bitpacked runs are a multiple of 8). Don't need to round up the last term since that // overhead is accounted for in the '5'. // TODO: this formula does not take into account the data for RLE runs. The worst realistic case // is repeated runs of 8 bitpacked, 2 RLE values. In this case, the formula would be // 0.8 * (num_values * bw / 8 + num_values / 8) + 0.2 * (num_values / 2 * (1 + (bw+7)/8)) // for bw < 8 the above value will be larger than below, but in testing it seems like for low // bitwidths it's hard to get the pathological 8:2 split. // If the encoder starts printing the data corruption warning, then this will need to be // revisited. return 4 + 5 + util::div_rounding_up_unsafe(num_values * value_bit_width, 8) + (num_values / 8); } // subtract b from a, but return 0 if this would underflow constexpr size_t underflow_safe_subtract(size_t a, size_t b) { if (b > a) { return 0; } return a - b; } void __device__ init_frag_state(frag_init_state_s* const s, uint32_t fragment_size, int part_end_row) { // frag.num_rows = fragment_size except for the last fragment in partition which can be // smaller. num_rows is fixed but fragment size could be larger if the data is strings or // nested. s->frag.num_rows = min(fragment_size, part_end_row - s->frag.start_row); s->frag.num_dict_vals = 0; s->frag.fragment_data_size = 0; s->frag.dict_data_size = 0; s->frag.start_value_idx = row_to_value_idx(s->frag.start_row, s->col); auto const end_value_idx = row_to_value_idx(s->frag.start_row + s->frag.num_rows, s->col); s->frag.num_leaf_values = end_value_idx - s->frag.start_value_idx; if (s->col.level_offsets != nullptr) { // For nested schemas, the number of values in a fragment is not directly related to the // number of encoded data elements or the number of rows. It is simply the number of // repetition/definition values which together encode validity and nesting information. auto const first_level_val_idx = s->col.level_offsets[s->frag.start_row]; auto const last_level_val_idx = s->col.level_offsets[s->frag.start_row + s->frag.num_rows]; s->frag.num_values = last_level_val_idx - first_level_val_idx; } else { s->frag.num_values = s->frag.num_rows; } } template <int block_size> void __device__ calculate_frag_size(frag_init_state_s* const s, int t) { using block_reduce = cub::BlockReduce<uint32_t, block_size>; __shared__ typename block_reduce::TempStorage reduce_storage; auto const physical_type = s->col.physical_type; auto const leaf_type = s->col.leaf_column->type().id(); auto const dtype_len = physical_type_len(physical_type, leaf_type); auto const nvals = s->frag.num_leaf_values; auto const start_value_idx = s->frag.start_value_idx; for (uint32_t i = 0; i < nvals; i += block_size) { auto const val_idx = start_value_idx + i + t; auto const is_valid = i + t < nvals && val_idx < s->col.leaf_column->size() && s->col.leaf_column->is_valid(val_idx); uint32_t len; if (is_valid) { len = dtype_len; if (physical_type == BYTE_ARRAY) { switch (leaf_type) { case type_id::STRING: { auto str = s->col.leaf_column->element<string_view>(val_idx); len += str.size_bytes(); } break; case type_id::LIST: { auto list_element = get_element<statistics::byte_array_view>(*s->col.leaf_column, val_idx); len += list_element.size_bytes(); } break; default: CUDF_UNREACHABLE("Unsupported data type for leaf column"); } } } else { len = 0; } len = block_reduce(reduce_storage).Sum(len); if (t == 0) { s->frag.fragment_data_size += len; } __syncthreads(); // page fragment size must fit in a 32-bit signed integer if (s->frag.fragment_data_size > std::numeric_limits<int32_t>::max()) { CUDF_UNREACHABLE("page fragment size exceeds maximum for i32"); } } } /** * @brief Determine the correct page encoding for the given page parameters. * * This is only used by the plain and dictionary encoders. Delta encoders will set the page * encoding directly. */ Encoding __device__ determine_encoding(PageType page_type, Type physical_type, bool use_dictionary, bool write_v2_headers) { // NOTE: For dictionary encoding, parquet v2 recommends using PLAIN in dictionary page and // RLE_DICTIONARY in data page, but parquet v1 uses PLAIN_DICTIONARY in both dictionary and // data pages (actual encoding is identical). switch (page_type) { case PageType::DATA_PAGE: return use_dictionary ? Encoding::PLAIN_DICTIONARY : Encoding::PLAIN; case PageType::DATA_PAGE_V2: return physical_type == BOOLEAN ? Encoding::RLE : use_dictionary ? Encoding::RLE_DICTIONARY : Encoding::PLAIN; case PageType::DICTIONARY_PAGE: return write_v2_headers ? Encoding::PLAIN : Encoding::PLAIN_DICTIONARY; default: CUDF_UNREACHABLE("unsupported page type"); } } // operator to use with warp_reduce. stolen from cub::Sum struct BitwiseOr { /// Binary OR operator, returns <tt>a | b</tt> template <typename T> __host__ __device__ __forceinline__ T operator()(T const& a, T const& b) const { return a | b; } }; // I is the column type from the input table template <typename I> __device__ uint8_t const* delta_encode(page_enc_state_s<0>* s, uint32_t valid_count, uint64_t* buffer, void* temp_space) { using output_type = std::conditional_t<std::is_signed_v<I>, zigzag128_t, uleb128_t>; __shared__ delta_binary_packer<output_type> packer; auto const t = threadIdx.x; if (t == 0) { packer.init(s->cur, valid_count, reinterpret_cast<output_type*>(buffer), temp_space); } __syncthreads(); // TODO(ets): in the plain encoder the scaling is a little different for INT32 than INT64. // might need to modify this if there's a big performance hit in the 32-bit case. int32_t const scale = s->col.ts_scale == 0 ? 1 : s->col.ts_scale; for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) { uint32_t const nvals = min(s->page.num_leaf_values - cur_val_idx, delta::block_size); size_type const val_idx_in_block = cur_val_idx + t; size_type const val_idx = s->page_start_val + val_idx_in_block; bool const is_valid = (val_idx < s->col.leaf_column->size() && val_idx_in_block < s->page.num_leaf_values) ? s->col.leaf_column->is_valid(val_idx) : false; cur_val_idx += nvals; output_type v = is_valid ? s->col.leaf_column->element<I>(val_idx) : 0; if (scale < 0) { v /= -scale; } else { v *= scale; } packer.add_value(v, is_valid); } return packer.flush(); } } // anonymous namespace // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuInitRowGroupFragments(device_2dspan<PageFragment> frag, device_span<parquet_column_device_view const> col_desc, device_span<partition_info const> partitions, device_span<int const> part_frag_offset, uint32_t fragment_size) { __shared__ __align__(16) frag_init_state_s state_g; frag_init_state_s* const s = &state_g; auto const t = threadIdx.x; auto const num_fragments_per_column = frag.size().second; if (t == 0) { s->col = col_desc[blockIdx.x]; } __syncthreads(); for (uint32_t frag_y = blockIdx.y; frag_y < num_fragments_per_column; frag_y += gridDim.y) { if (t == 0) { // Find which partition this fragment came from auto it = thrust::upper_bound(thrust::seq, part_frag_offset.begin(), part_frag_offset.end(), frag_y); int const p = it - part_frag_offset.begin() - 1; int const part_end_row = partitions[p].start_row + partitions[p].num_rows; s->frag.start_row = (frag_y - part_frag_offset[p]) * fragment_size + partitions[p].start_row; s->frag.chunk = frag[blockIdx.x][frag_y].chunk; init_frag_state(s, fragment_size, part_end_row); } __syncthreads(); calculate_frag_size<block_size>(s, t); __syncthreads(); if (t == 0) { frag[blockIdx.x][frag_y] = s->frag; } } } // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuCalculatePageFragments(device_span<PageFragment> frag, device_span<size_type const> column_frag_sizes) { __shared__ __align__(16) frag_init_state_s state_g; EncColumnChunk* const ck_g = frag[blockIdx.x].chunk; frag_init_state_s* const s = &state_g; uint32_t const t = threadIdx.x; auto const fragment_size = column_frag_sizes[ck_g->col_desc_id]; if (t == 0) { s->col = *ck_g->col_desc; } __syncthreads(); if (t == 0) { int const part_end_row = ck_g->start_row + ck_g->num_rows; s->frag.start_row = ck_g->start_row + (blockIdx.x - ck_g->first_fragment) * fragment_size; s->frag.chunk = ck_g; init_frag_state(s, fragment_size, part_end_row); } __syncthreads(); calculate_frag_size<block_size>(s, t); if (t == 0) { frag[blockIdx.x] = s->frag; } } // blockDim {128,1,1} __global__ void __launch_bounds__(128) gpuInitFragmentStats(device_span<statistics_group> groups, device_span<PageFragment const> fragments) { uint32_t const lane_id = threadIdx.x & WARP_MASK; uint32_t const frag_id = blockIdx.x * 4 + (threadIdx.x / cudf::detail::warp_size); if (frag_id < fragments.size()) { if (lane_id == 0) { statistics_group g; auto* const ck_g = fragments[frag_id].chunk; g.col = ck_g->col_desc; g.start_row = fragments[frag_id].start_value_idx; g.num_rows = fragments[frag_id].num_leaf_values; g.non_leaf_nulls = fragments[frag_id].num_values - g.num_rows; groups[frag_id] = g; } } } __device__ size_t delta_data_len(Type physical_type, cudf::type_id type_id, uint32_t num_values) { auto const dtype_len_out = physical_type_len(physical_type, type_id); auto const dtype_len = [&]() -> uint32_t { if (physical_type == INT32) { return int32_logical_len(type_id); } if (physical_type == INT96) { return sizeof(int64_t); } return dtype_len_out; }(); auto const vals_per_block = delta::block_size; size_t const num_blocks = util::div_rounding_up_unsafe(num_values, vals_per_block); // need max dtype_len + 1 bytes for min_delta (because we only encode 7 bits per byte) // one byte per mini block for the bitwidth auto const mini_block_header_size = dtype_len + 1 + delta::num_mini_blocks; // each encoded value can be at most sizeof(type) * 8 + 1 bits auto const max_bits = dtype_len * 8 + 1; // each data block will then be max_bits * values per block. vals_per_block is guaranteed to be // divisible by 128 (via static assert on delta::block_size), but do safe division anyway. auto const bytes_per_block = cudf::util::div_rounding_up_unsafe(max_bits * vals_per_block, 8); auto const block_size = mini_block_header_size + bytes_per_block; // delta header is 2 bytes for the block_size, 1 byte for number of mini-blocks, // max 5 bytes for number of values, and max dtype_len + 1 for first value. // TODO: if we ever allow configurable block sizes then this calculation will need to be // modified. auto const header_size = 2 + 1 + 5 + dtype_len + 1; return header_size + num_blocks * block_size; } // blockDim {128,1,1} __global__ void __launch_bounds__(128) gpuInitPages(device_2dspan<EncColumnChunk> chunks, device_span<EncPage> pages, device_span<size_type> page_sizes, device_span<size_type> comp_page_sizes, device_span<parquet_column_device_view const> col_desc, statistics_merge_group* page_grstats, statistics_merge_group* chunk_grstats, int32_t num_columns, size_t max_page_size_bytes, size_type max_page_size_rows, uint32_t page_align, bool write_v2_headers) { // TODO: All writing seems to be done by thread 0. Could be replaced by thrust foreach __shared__ __align__(8) parquet_column_device_view col_g; __shared__ __align__(8) EncColumnChunk ck_g; __shared__ __align__(8) PageFragment frag_g; __shared__ __align__(8) EncPage page_g; __shared__ __align__(8) statistics_merge_group pagestats_g; uint32_t const t = threadIdx.x; auto const data_page_type = write_v2_headers ? PageType::DATA_PAGE_V2 : PageType::DATA_PAGE; if (t == 0) { col_g = col_desc[blockIdx.x]; ck_g = chunks[blockIdx.y][blockIdx.x]; page_g = {}; } __syncthreads(); // if writing delta encoded values, we're going to need to know the data length to get a guess // at the worst case number of bytes needed to encode. auto const physical_type = col_g.physical_type; auto const type_id = col_g.leaf_column->type().id(); auto const is_use_delta = write_v2_headers && !ck_g.use_dictionary && (physical_type == INT32 || physical_type == INT64); if (t < 32) { uint32_t fragments_in_chunk = 0; uint32_t rows_in_page = 0; uint32_t values_in_page = 0; uint32_t leaf_values_in_page = 0; size_t page_size = 0; uint32_t num_pages = 0; uint32_t num_rows = 0; uint32_t page_start = 0; uint32_t page_offset = ck_g.ck_stat_size; uint32_t num_dict_entries = 0; uint32_t comp_page_offset = ck_g.ck_stat_size; uint32_t page_headers_size = 0; uint32_t max_page_data_size = 0; uint32_t cur_row = ck_g.start_row; uint32_t ck_max_stats_len = 0; uint32_t max_stats_len = 0; if (!t) { pagestats_g.col_dtype = col_g.leaf_column->type(); pagestats_g.stats_dtype = col_g.stats_dtype; pagestats_g.start_chunk = ck_g.first_fragment; pagestats_g.num_chunks = 0; } if (ck_g.use_dictionary) { if (!t) { page_g.page_data = ck_g.uncompressed_bfr + page_offset; page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset; page_g.num_fragments = 0; page_g.page_type = PageType::DICTIONARY_PAGE; page_g.chunk = &chunks[blockIdx.y][blockIdx.x]; page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x; page_g.hdr_size = 0; page_g.max_hdr_size = 32; page_g.max_data_size = ck_g.uniq_data_size; page_g.start_row = cur_row; page_g.num_rows = ck_g.num_dict_entries; page_g.num_leaf_values = ck_g.num_dict_entries; page_g.num_values = ck_g.num_dict_entries; // TODO: shouldn't matter for dict page page_offset += util::round_up_unsafe(page_g.max_hdr_size + page_g.max_data_size, page_align); if (not comp_page_sizes.empty()) { comp_page_offset += page_g.max_hdr_size + comp_page_sizes[ck_g.first_page]; } page_headers_size += page_g.max_hdr_size; max_page_data_size = max(max_page_data_size, page_g.max_data_size); } __syncwarp(); if (t == 0) { if (not pages.empty()) { page_g.kernel_mask = encode_kernel_mask::PLAIN; pages[ck_g.first_page] = page_g; } if (not page_sizes.empty()) { page_sizes[ck_g.first_page] = page_g.max_data_size; } if (page_grstats) { page_grstats[ck_g.first_page] = pagestats_g; } } num_pages = 1; } __syncwarp(); // page padding needed for RLE encoded boolean data auto const rle_pad = write_v2_headers && col_g.physical_type == BOOLEAN ? RLE_LENGTH_FIELD_LEN : 0; // This loop goes over one page fragment at a time and adds it to page. // When page size crosses a particular limit, then it moves on to the next page and then next // page fragment gets added to that one. // This doesn't actually deal with data. It's agnostic. It only cares about number of rows and // page size. do { uint32_t minmax_len = 0; __syncwarp(); if (num_rows < ck_g.num_rows) { if (t == 0) { frag_g = ck_g.fragments[fragments_in_chunk]; } if (!t && ck_g.stats) { if (col_g.stats_dtype == dtype_string) { minmax_len = max(ck_g.stats[fragments_in_chunk].min_value.str_val.length, ck_g.stats[fragments_in_chunk].max_value.str_val.length); } else if (col_g.stats_dtype == dtype_byte_array) { minmax_len = max(ck_g.stats[fragments_in_chunk].min_value.byte_val.length, ck_g.stats[fragments_in_chunk].max_value.byte_val.length); } } } else if (!t) { frag_g.fragment_data_size = 0; frag_g.num_rows = 0; } __syncwarp(); uint32_t fragment_data_size = (ck_g.use_dictionary) ? frag_g.num_leaf_values * util::div_rounding_up_unsafe(ck_g.dict_rle_bits, 8) : frag_g.fragment_data_size; // page fragment size must fit in a 32-bit signed integer if (fragment_data_size > std::numeric_limits<int32_t>::max()) { CUDF_UNREACHABLE("page fragment size exceeds maximum for i32"); } // TODO (dm): this convoluted logic to limit page size needs refactoring size_t this_max_page_size = (values_in_page * 2 >= ck_g.num_values) ? 256 * 1024 : (values_in_page * 3 >= ck_g.num_values) ? 384 * 1024 : 512 * 1024; // override this_max_page_size if the requested size is smaller this_max_page_size = min(this_max_page_size, max_page_size_bytes); // subtract size of rep and def level vectors and RLE length field auto num_vals = values_in_page + frag_g.num_values; this_max_page_size = underflow_safe_subtract( this_max_page_size, max_RLE_page_size(col_g.num_def_level_bits(), num_vals) + max_RLE_page_size(col_g.num_rep_level_bits(), num_vals) + rle_pad); // checks to see when we need to close the current page and start a new one auto const is_last_chunk = num_rows >= ck_g.num_rows; auto const is_page_bytes_exceeded = page_size + fragment_data_size > this_max_page_size; auto const is_page_rows_exceeded = rows_in_page + frag_g.num_rows > max_page_size_rows; // only check for limit overflow if there's already at least one fragment for this page auto const is_page_too_big = values_in_page > 0 && (is_page_bytes_exceeded || is_page_rows_exceeded); if (is_last_chunk || is_page_too_big) { if (ck_g.use_dictionary) { // Additional byte to store entry bit width page_size = 1 + max_RLE_page_size(ck_g.dict_rle_bits, values_in_page); } if (!t) { page_g.num_fragments = fragments_in_chunk - page_start; page_g.chunk = &chunks[blockIdx.y][blockIdx.x]; page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x; page_g.page_type = data_page_type; page_g.hdr_size = 0; page_g.max_hdr_size = 32; // Max size excluding statistics if (ck_g.stats) { uint32_t stats_hdr_len = 16; if (col_g.stats_dtype == dtype_string || col_g.stats_dtype == dtype_byte_array) { stats_hdr_len += 5 * 3 + 2 * max_stats_len; } else { stats_hdr_len += ((col_g.stats_dtype >= dtype_int64) ? 10 : 5) * 3; } page_g.max_hdr_size += stats_hdr_len; } page_g.max_hdr_size = util::round_up_unsafe(page_g.max_hdr_size, page_align); page_g.page_data = ck_g.uncompressed_bfr + page_offset; if (not comp_page_sizes.empty()) { page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset; } page_g.start_row = cur_row; page_g.num_rows = rows_in_page; page_g.num_leaf_values = leaf_values_in_page; page_g.num_values = values_in_page; auto const def_level_size = max_RLE_page_size(col_g.num_def_level_bits(), values_in_page); auto const rep_level_size = max_RLE_page_size(col_g.num_rep_level_bits(), values_in_page); // get a different bound if using delta encoding if (is_use_delta) { page_size = max(page_size, delta_data_len(physical_type, type_id, page_g.num_leaf_values)); } auto const max_data_size = page_size + def_level_size + rep_level_size + rle_pad; // page size must fit in 32-bit signed integer if (max_data_size > std::numeric_limits<int32_t>::max()) { CUDF_UNREACHABLE("page size exceeds maximum for i32"); } page_g.max_data_size = static_cast<uint32_t>(max_data_size); pagestats_g.start_chunk = ck_g.first_fragment + page_start; pagestats_g.num_chunks = page_g.num_fragments; page_offset += util::round_up_unsafe(page_g.max_hdr_size + page_g.max_data_size, page_align); if (not comp_page_sizes.empty()) { comp_page_offset += page_g.max_hdr_size + comp_page_sizes[ck_g.first_page + num_pages]; } page_headers_size += page_g.max_hdr_size; max_page_data_size = max(max_page_data_size, page_g.max_data_size); cur_row += rows_in_page; ck_max_stats_len = max(ck_max_stats_len, max_stats_len); } __syncwarp(); if (t == 0) { if (not pages.empty()) { if (is_use_delta) { page_g.kernel_mask = encode_kernel_mask::DELTA_BINARY; } else if (ck_g.use_dictionary || physical_type == BOOLEAN) { page_g.kernel_mask = encode_kernel_mask::DICTIONARY; } else { page_g.kernel_mask = encode_kernel_mask::PLAIN; } pages[ck_g.first_page + num_pages] = page_g; } if (not page_sizes.empty()) { page_sizes[ck_g.first_page + num_pages] = page_g.max_data_size; } if (page_grstats) { page_grstats[ck_g.first_page + num_pages] = pagestats_g; } } num_pages++; page_size = 0; rows_in_page = 0; values_in_page = 0; leaf_values_in_page = 0; page_start = fragments_in_chunk; max_stats_len = 0; } max_stats_len = max(max_stats_len, minmax_len); num_dict_entries += frag_g.num_dict_vals; page_size += fragment_data_size; rows_in_page += frag_g.num_rows; values_in_page += frag_g.num_values; leaf_values_in_page += frag_g.num_leaf_values; num_rows += frag_g.num_rows; fragments_in_chunk++; } while (frag_g.num_rows != 0); __syncwarp(); if (!t) { if (ck_g.ck_stat_size == 0 && ck_g.stats) { uint32_t ck_stat_size = util::round_up_unsafe(48 + 2 * ck_max_stats_len, page_align); page_offset += ck_stat_size; comp_page_offset += ck_stat_size; ck_g.ck_stat_size = ck_stat_size; } ck_g.num_pages = num_pages; ck_g.bfr_size = page_offset; ck_g.page_headers_size = page_headers_size; ck_g.max_page_data_size = max_page_data_size; if (not comp_page_sizes.empty()) { ck_g.compressed_size = comp_page_offset; } pagestats_g.start_chunk = ck_g.first_page + ck_g.use_dictionary; // Exclude dictionary pagestats_g.num_chunks = num_pages - ck_g.use_dictionary; } } __syncthreads(); if (t == 0) { if (not pages.empty()) ck_g.pages = &pages[ck_g.first_page]; chunks[blockIdx.y][blockIdx.x] = ck_g; if (chunk_grstats) chunk_grstats[blockIdx.y * num_columns + blockIdx.x] = pagestats_g; } } /** * @brief Mask table representing how many consecutive repeats are needed to code a repeat run *[nbits-1] */ static __device__ __constant__ uint32_t kRleRunMask[24] = { 0x00ff'ffff, 0x0fff, 0x00ff, 0x3f, 0x0f, 0x0f, 0x7, 0x7, 0x3, 0x3, 0x3, 0x3, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}; /** * @brief Variable-length encode an integer */ inline __device__ uint8_t* VlqEncode(uint8_t* p, uint32_t v) { while (v > 0x7f) { *p++ = (v | 0x80); v >>= 7; } *p++ = v; return p; } /** * @brief Pack literal values in output bitstream (1,2,3,4,5,6,8,10,12,16,20 or 24 bits per value) */ inline __device__ void PackLiteralsShuffle( uint8_t* dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t) { constexpr uint32_t MASK2T = 1; // mask for 2 thread leader constexpr uint32_t MASK4T = 3; // mask for 4 thread leader constexpr uint32_t MASK8T = 7; // mask for 8 thread leader uint64_t v64; if (t > (count | 0x1f)) { return; } switch (w) { case 1: v |= shuffle_xor(v, 1) << 1; // grab bit 1 from neighbor v |= shuffle_xor(v, 2) << 2; // grab bits 2-3 from 2 lanes over v |= shuffle_xor(v, 4) << 4; // grab bits 4-7 from 4 lanes over // sub-warp leader writes the combined bits if (t < count && !(t & MASK8T)) { dst[(t * w) >> 3] = v; } return; case 2: v |= shuffle_xor(v, 1) << 2; v |= shuffle_xor(v, 2) << 4; if (t < count && !(t & MASK4T)) { dst[(t * w) >> 3] = v; } return; case 3: v |= shuffle_xor(v, 1) << 3; v |= shuffle_xor(v, 2) << 6; v |= shuffle_xor(v, 4) << 12; if (t < count && !(t & MASK8T)) { dst[(t >> 3) * 3 + 0] = v; dst[(t >> 3) * 3 + 1] = v >> 8; dst[(t >> 3) * 3 + 2] = v >> 16; } return; case 4: v |= shuffle_xor(v, 1) << 4; if (t < count && !(t & MASK2T)) { dst[(t * w) >> 3] = v; } return; case 5: v |= shuffle_xor(v, 1) << 5; v |= shuffle_xor(v, 2) << 10; v64 = static_cast<uint64_t>(shuffle_xor(v, 4)) << 20 | v; if (t < count && !(t & MASK8T)) { dst[(t >> 3) * 5 + 0] = v64; dst[(t >> 3) * 5 + 1] = v64 >> 8; dst[(t >> 3) * 5 + 2] = v64 >> 16; dst[(t >> 3) * 5 + 3] = v64 >> 24; dst[(t >> 3) * 5 + 4] = v64 >> 32; } return; case 6: v |= shuffle_xor(v, 1) << 6; v |= shuffle_xor(v, 2) << 12; if (t < count && !(t & MASK4T)) { dst[(t >> 2) * 3 + 0] = v; dst[(t >> 2) * 3 + 1] = v >> 8; dst[(t >> 2) * 3 + 2] = v >> 16; } return; case 8: if (t < count) { dst[t] = v; } return; case 10: v |= shuffle_xor(v, 1) << 10; v64 = static_cast<uint64_t>(shuffle_xor(v, 2)) << 20 | v; if (t < count && !(t & MASK4T)) { dst[(t >> 2) * 5 + 0] = v64; dst[(t >> 2) * 5 + 1] = v64 >> 8; dst[(t >> 2) * 5 + 2] = v64 >> 16; dst[(t >> 2) * 5 + 3] = v64 >> 24; dst[(t >> 2) * 5 + 4] = v64 >> 32; } return; case 12: v |= shuffle_xor(v, 1) << 12; if (t < count && !(t & MASK2T)) { dst[(t >> 1) * 3 + 0] = v; dst[(t >> 1) * 3 + 1] = v >> 8; dst[(t >> 1) * 3 + 2] = v >> 16; } return; case 16: if (t < count) { dst[t * 2 + 0] = v; dst[t * 2 + 1] = v >> 8; } return; case 20: v64 = static_cast<uint64_t>(shuffle_xor(v, 1)) << 20 | v; if (t < count && !(t & MASK2T)) { dst[(t >> 1) * 5 + 0] = v64; dst[(t >> 1) * 5 + 1] = v64 >> 8; dst[(t >> 1) * 5 + 2] = v64 >> 16; dst[(t >> 1) * 5 + 3] = v64 >> 24; dst[(t >> 1) * 5 + 4] = v64 >> 32; } return; case 24: if (t < count) { dst[t * 3 + 0] = v; dst[t * 3 + 1] = v >> 8; dst[t * 3 + 2] = v >> 16; } return; default: CUDF_UNREACHABLE("Unsupported bit width"); } } /** * @brief Pack literals of arbitrary bit-length in output bitstream. */ inline __device__ void PackLiteralsRoundRobin( uint8_t* dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t) { // Scratch space to temporarily write to. Needed because we will use atomics to write 32 bit // words but the destination mem may not be a multiple of 4 bytes. // TODO (dm): This assumes blockdim = 128. Reduce magic numbers. constexpr uint32_t NUM_THREADS = 128; // this needs to match gpuEncodePages block_size parameter constexpr uint32_t NUM_BYTES = (NUM_THREADS * MAX_DICT_BITS) >> 3; constexpr uint32_t SCRATCH_SIZE = NUM_BYTES / sizeof(uint32_t); __shared__ uint32_t scratch[SCRATCH_SIZE]; for (uint32_t i = t; i < SCRATCH_SIZE; i += NUM_THREADS) { scratch[i] = 0; } __syncthreads(); if (t <= count) { // shift symbol left by up to 31 bits uint64_t v64 = v; v64 <<= (t * w) & 0x1f; // Copy 64 bit word into two 32 bit words while following C++ strict aliasing rules. uint32_t v32[2]; memcpy(&v32, &v64, sizeof(uint64_t)); // Atomically write result to scratch if (v32[0]) { atomicOr(scratch + ((t * w) >> 5), v32[0]); } if (v32[1]) { atomicOr(scratch + ((t * w) >> 5) + 1, v32[1]); } } __syncthreads(); // Copy scratch data to final destination auto available_bytes = (count * w + 7) / 8; auto scratch_bytes = reinterpret_cast<char*>(&scratch[0]); for (uint32_t i = t; i < available_bytes; i += NUM_THREADS) { dst[i] = scratch_bytes[i]; } __syncthreads(); } /** * @brief Pack literal values in output bitstream */ inline __device__ void PackLiterals( uint8_t* dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t) { if (w > 24) { CUDF_UNREACHABLE("Unsupported bit width"); } switch (w) { case 1: case 2: case 3: case 4: case 5: case 6: case 8: case 10: case 12: case 16: case 20: case 24: // bit widths that lie on easy boundaries can be handled either directly // (8, 16, 24) or through fast shuffle operations. PackLiteralsShuffle(dst, v, count, w, t); return; default: // bit packing that uses atomics, but can handle arbitrary bit widths up to 24. PackLiteralsRoundRobin(dst, v, count, w, t); } } /** * @brief RLE encoder * * @param[in,out] s Page encode state * @param[in] numvals Total count of input values * @param[in] nbits number of bits per symbol (1..16) * @param[in] flush nonzero if last batch in block * @param[in] t thread id (0..127) */ static __device__ void RleEncode( rle_page_enc_state_s* s, uint32_t numvals, uint32_t nbits, uint32_t flush, uint32_t t) { using cudf::detail::warp_size; auto const lane_id = t % warp_size; auto const warp_id = t / warp_size; uint32_t rle_pos = s->rle_pos; uint32_t rle_run = s->rle_run; while (rle_pos < numvals || (flush && rle_run)) { uint32_t pos = rle_pos + t; if (rle_run > 0 && !(rle_run & 1)) { // Currently in a long repeat run uint32_t mask = ballot(pos < numvals && s->vals[rolling_idx(pos)] == s->run_val); uint32_t rle_rpt_count, max_rpt_count; if (lane_id == 0) { s->rpt_map[warp_id] = mask; } __syncthreads(); if (t < warp_size) { uint32_t c32 = ballot(t >= 4 || s->rpt_map[t] != 0xffff'ffffu); if (t == 0) { uint32_t last_idx = __ffs(c32) - 1; s->rle_rpt_count = last_idx * warp_size + ((last_idx < 4) ? __ffs(~s->rpt_map[last_idx]) - 1 : 0); } } __syncthreads(); max_rpt_count = min(numvals - rle_pos, encode_block_size); rle_rpt_count = s->rle_rpt_count; rle_run += rle_rpt_count << 1; rle_pos += rle_rpt_count; if (rle_rpt_count < max_rpt_count || (flush && rle_pos == numvals)) { if (t == 0) { uint32_t const run_val = s->run_val; uint8_t* dst = VlqEncode(s->rle_out, rle_run); *dst++ = run_val; if (nbits > 8) { *dst++ = run_val >> 8; } if (nbits > 16) { *dst++ = run_val >> 16; } s->rle_out = dst; } rle_run = 0; } } else { // New run or in a literal run uint32_t v0 = s->vals[rolling_idx(pos)]; uint32_t v1 = s->vals[rolling_idx(pos + 1)]; uint32_t mask = ballot(pos + 1 < numvals && v0 == v1); uint32_t maxvals = min(numvals - rle_pos, encode_block_size); uint32_t rle_lit_count, rle_rpt_count; if (lane_id == 0) { s->rpt_map[warp_id] = mask; } __syncthreads(); if (t < warp_size) { // Repeat run can only start on a multiple of 8 values uint32_t idx8 = (t * 8) / warp_size; uint32_t pos8 = (t * 8) % warp_size; uint32_t m0 = (idx8 < 4) ? s->rpt_map[idx8] : 0; uint32_t m1 = (idx8 < 3) ? s->rpt_map[idx8 + 1] : 0; uint32_t needed_mask = kRleRunMask[nbits - 1]; mask = ballot((__funnelshift_r(m0, m1, pos8) & needed_mask) == needed_mask); if (!t) { uint32_t rle_run_start = (mask != 0) ? min((__ffs(mask) - 1) * 8, maxvals) : maxvals; uint32_t rpt_len = 0; if (rle_run_start < maxvals) { uint32_t idx_cur = rle_run_start / warp_size; uint32_t idx_ofs = rle_run_start % warp_size; while (idx_cur < 4) { m0 = (idx_cur < 4) ? s->rpt_map[idx_cur] : 0; m1 = (idx_cur < 3) ? s->rpt_map[idx_cur + 1] : 0; mask = ~__funnelshift_r(m0, m1, idx_ofs); if (mask != 0) { rpt_len += __ffs(mask) - 1; break; } rpt_len += warp_size; idx_cur++; } } s->rle_lit_count = rle_run_start; s->rle_rpt_count = min(rpt_len, maxvals - rle_run_start); } } __syncthreads(); rle_lit_count = s->rle_lit_count; rle_rpt_count = s->rle_rpt_count; if (rle_lit_count != 0 || (rle_run != 0 && rle_rpt_count != 0)) { uint32_t lit_div8; bool need_more_data = false; if (!flush && rle_pos + rle_lit_count == numvals) { // Wait for more data rle_lit_count -= min(rle_lit_count, 24); need_more_data = true; } if (rle_lit_count != 0) { lit_div8 = (rle_lit_count + ((flush && rle_pos + rle_lit_count == numvals) ? 7 : 0)) >> 3; if (rle_run + lit_div8 * 2 > 0x7f) { lit_div8 = 0x3f - (rle_run >> 1); // Limit to fixed 1-byte header (504 literals) rle_rpt_count = 0; // Defer repeat run } if (lit_div8 != 0) { uint8_t* dst = s->rle_out + 1 + (rle_run >> 1) * nbits; PackLiterals(dst, (rle_pos + t < numvals) ? v0 : 0, lit_div8 * 8, nbits, t); rle_run = (rle_run + lit_div8 * 2) | 1; rle_pos = min(rle_pos + lit_div8 * 8, numvals); } } if (rle_run >= ((rle_rpt_count != 0 || (flush && rle_pos == numvals)) ? 0x03 : 0x7f)) { __syncthreads(); // Complete literal run if (!t) { uint8_t* dst = s->rle_out; dst[0] = rle_run; // At most 0x7f dst += 1 + nbits * (rle_run >> 1); s->rle_out = dst; } rle_run = 0; } if (need_more_data) { break; } } // Start a repeat run if (rle_rpt_count != 0) { if (t == s->rle_lit_count) { s->run_val = v0; } rle_run = rle_rpt_count * 2; rle_pos += rle_rpt_count; if (rle_pos + 1 == numvals && !flush) { break; } } } __syncthreads(); } __syncthreads(); if (!t) { s->rle_run = rle_run; s->rle_pos = rle_pos; s->rle_numvals = numvals; } } /** * @brief PLAIN bool encoder * * @param[in,out] s Page encode state * @param[in] numvals Total count of input values * @param[in] flush nonzero if last batch in block * @param[in] t thread id (0..127) */ static __device__ void PlainBoolEncode(rle_page_enc_state_s* s, uint32_t numvals, uint32_t flush, uint32_t t) { uint32_t rle_pos = s->rle_pos; uint8_t* dst = s->rle_out; while (rle_pos < numvals) { uint32_t pos = rle_pos + t; uint32_t v = (pos < numvals) ? s->vals[rolling_idx(pos)] : 0; uint32_t n = min(numvals - rle_pos, 128); uint32_t nbytes = (n + ((flush) ? 7 : 0)) >> 3; if (!nbytes) { break; } v |= shuffle_xor(v, 1) << 1; v |= shuffle_xor(v, 2) << 2; v |= shuffle_xor(v, 4) << 4; if (t < n && !(t & 7)) { dst[t >> 3] = v; } rle_pos = min(rle_pos + nbytes * 8, numvals); dst += nbytes; } __syncthreads(); if (!t) { s->rle_pos = rle_pos; s->rle_numvals = numvals; s->rle_out = dst; } } /** * @brief Determines the difference between the Proleptic Gregorian Calendar epoch (1970-01-01 * 00:00:00 UTC) and the Julian date epoch (-4713-11-24 12:00:00 UTC). * * @return The difference between two epochs in `cuda::std::chrono::duration` format with a period * of hours. */ constexpr auto julian_calendar_epoch_diff() { using namespace cuda::std::chrono; using namespace cuda::std::chrono_literals; return sys_days{January / 1 / 1970} - (sys_days{November / 24 / -4713} + 12h); } /** * @brief Converts number `v` of periods of type `PeriodT` into a pair with nanoseconds since * midnight and number of Julian days. Does not deal with time zones. Used by INT96 code. * * @tparam PeriodT a ratio representing the tick period in duration * @param v count of ticks since epoch * @return A pair of (nanoseconds, days) where nanoseconds is the number of nanoseconds * elapsed in the day and days is the number of days from Julian epoch. */ template <typename PeriodT> __device__ auto julian_days_with_time(int64_t v) { using namespace cuda::std::chrono; auto const dur_total = duration<int64_t, PeriodT>{v}; auto const dur_days = floor<days>(dur_total); auto const dur_time_of_day = dur_total - dur_days; auto const dur_time_of_day_nanos = duration_cast<nanoseconds>(dur_time_of_day); auto const julian_days = dur_days + ceil<days>(julian_calendar_epoch_diff()); return std::make_pair(dur_time_of_day_nanos, julian_days); } // this has been split out into its own kernel because of the amount of shared memory required // for the state buffer. encode kernels that don't use the RLE buffer can get started while // the level data is encoded. // blockDim(128, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 8) gpuEncodePageLevels(device_span<EncPage> pages, bool write_v2_headers, encode_kernel_mask kernel_mask) { __shared__ __align__(8) rle_page_enc_state_s state_g; auto* const s = &state_g; uint32_t const t = threadIdx.x; if (t == 0) { state_g = rle_page_enc_state_s{}; s->page = pages[blockIdx.x]; s->ck = *s->page.chunk; s->col = *s->ck.col_desc; s->cur = s->page.page_data + s->page.max_hdr_size; // init V2 info s->page.def_lvl_bytes = 0; s->page.rep_lvl_bytes = 0; s->page.num_nulls = 0; s->rle_len_pos = nullptr; } __syncthreads(); if (BitAnd(s->page.kernel_mask, kernel_mask) == 0) { return; } auto const is_v2 = s->page.page_type == PageType::DATA_PAGE_V2; // Encode Repetition and Definition levels if (s->page.page_type != PageType::DICTIONARY_PAGE && (s->col.num_def_level_bits()) != 0 && // This means max definition level is not 0 (nullable) (s->col.num_rep_level_bits()) == 0 // This means there are no repetition levels (non-list) ) { // Calculate definition levels from validity uint32_t def_lvl_bits = s->col.num_def_level_bits(); if (def_lvl_bits != 0) { if (!t) { s->rle_run = 0; s->rle_pos = 0; s->rle_numvals = 0; s->rle_out = s->cur; if (not is_v2) { s->rle_out += 4; // save space for length } } __syncthreads(); while (s->rle_numvals < s->page.num_rows) { uint32_t rle_numvals = s->rle_numvals; uint32_t nrows = min(s->page.num_rows - rle_numvals, 128); auto row = s->page.start_row + rle_numvals + t; // Definition level encodes validity. Checks the valid map and if it is valid, then sets the // def_lvl accordingly and sets it in s->vals which is then given to RleEncode to encode uint32_t def_lvl = [&]() { bool within_bounds = rle_numvals + t < s->page.num_rows && row < s->col.num_rows; if (not within_bounds) { return 0u; } uint32_t def = 0; size_type l = 0; bool is_col_struct = false; auto col = *s->col.parent_column; do { // If col not nullable then it does not contribute to def levels if (s->col.nullability[l]) { if (col.is_valid(row)) { ++def; } else { // We have found the shallowest level at which this row is null break; } } is_col_struct = (col.type().id() == type_id::STRUCT); if (is_col_struct) { row += col.offset(); col = col.child(0); ++l; } } while (is_col_struct); return def; }(); s->vals[rolling_idx(rle_numvals + t)] = def_lvl; __syncthreads(); rle_numvals += nrows; RleEncode(s, rle_numvals, def_lvl_bits, (rle_numvals == s->page.num_rows), t); __syncthreads(); } if (t < 32) { uint8_t* const cur = s->cur; uint8_t* const rle_out = s->rle_out; // V2 does not write the RLE length field uint32_t const rle_bytes = static_cast<uint32_t>(rle_out - cur) - (is_v2 ? 0 : RLE_LENGTH_FIELD_LEN); if (not is_v2 && t < RLE_LENGTH_FIELD_LEN) { cur[t] = rle_bytes >> (t * 8); } __syncwarp(); if (t == 0) { s->cur = rle_out; s->page.def_lvl_bytes = rle_bytes; } } } } else if (s->page.page_type != PageType::DICTIONARY_PAGE && s->col.num_rep_level_bits() != 0 // This means there ARE repetition levels (has list) ) { auto encode_levels = [&](uint8_t const* lvl_val_data, uint32_t nbits, uint32_t& lvl_bytes) { // For list types, the repetition and definition levels are pre-calculated. We just need to // encode and write them now. if (!t) { s->rle_run = 0; s->rle_pos = 0; s->rle_numvals = 0; s->rle_out = s->cur; if (not is_v2) { s->rle_out += 4; // save space for length } } __syncthreads(); size_type page_first_val_idx = s->col.level_offsets[s->page.start_row]; size_type col_last_val_idx = s->col.level_offsets[s->col.num_rows]; while (s->rle_numvals < s->page.num_values) { uint32_t rle_numvals = s->rle_numvals; uint32_t nvals = min(s->page.num_values - rle_numvals, 128); uint32_t idx = page_first_val_idx + rle_numvals + t; uint32_t lvl_val = (rle_numvals + t < s->page.num_values && idx < col_last_val_idx) ? lvl_val_data[idx] : 0; s->vals[rolling_idx(rle_numvals + t)] = lvl_val; __syncthreads(); rle_numvals += nvals; RleEncode(s, rle_numvals, nbits, (rle_numvals == s->page.num_values), t); __syncthreads(); } if (t < 32) { uint8_t* const cur = s->cur; uint8_t* const rle_out = s->rle_out; // V2 does not write the RLE length field uint32_t const rle_bytes = static_cast<uint32_t>(rle_out - cur) - (is_v2 ? 0 : RLE_LENGTH_FIELD_LEN); if (not is_v2 && t < RLE_LENGTH_FIELD_LEN) { cur[t] = rle_bytes >> (t * 8); } __syncwarp(); if (t == 0) { s->cur = rle_out; lvl_bytes = rle_bytes; } } }; encode_levels(s->col.rep_values, s->col.num_rep_level_bits(), s->page.rep_lvl_bytes); __syncthreads(); encode_levels(s->col.def_values, s->col.num_def_level_bits(), s->page.def_lvl_bytes); } if (t == 0) { pages[blockIdx.x] = s->page; } } template <int block_size, typename state_buf> __device__ void finish_page_encode(state_buf* s, uint32_t valid_count, uint8_t const* end_ptr, device_span<EncPage> pages, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_results, bool write_v2_headers) { auto const t = threadIdx.x; // V2 does not compress rep and def level data size_t const skip_comp_size = write_v2_headers ? s->page.def_lvl_bytes + s->page.rep_lvl_bytes : 0; if (t == 0) { // only need num_nulls for v2 data page headers if (write_v2_headers) { s->page.num_nulls = s->page.num_values - valid_count; } uint8_t const* const base = s->page.page_data + s->page.max_hdr_size; auto const actual_data_size = static_cast<uint32_t>(end_ptr - base); if (actual_data_size > s->page.max_data_size) { // FIXME(ets): this needs to do error propagation back to the host CUDF_UNREACHABLE("detected possible page data corruption"); } s->page.max_data_size = actual_data_size; if (not comp_in.empty()) { comp_in[blockIdx.x] = {base + skip_comp_size, actual_data_size - skip_comp_size}; comp_out[blockIdx.x] = {s->page.compressed_data + s->page.max_hdr_size + skip_comp_size, 0}; // size is unused } pages[blockIdx.x] = s->page; if (not comp_results.empty()) { comp_results[blockIdx.x] = {0, compression_status::FAILURE}; pages[blockIdx.x].comp_res = &comp_results[blockIdx.x]; } } // copy uncompressed bytes over if (skip_comp_size != 0 && not comp_in.empty()) { uint8_t* const src = s->page.page_data + s->page.max_hdr_size; uint8_t* const dst = s->page.compressed_data + s->page.max_hdr_size; for (int i = t; i < skip_comp_size; i += block_size) { dst[i] = src[i]; } } } // PLAIN page data encoder // blockDim(128, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 8) gpuEncodePages(device_span<EncPage> pages, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_results, bool write_v2_headers) { __shared__ __align__(8) page_enc_state_s<0> state_g; using block_reduce = cub::BlockReduce<uint32_t, block_size>; using block_scan = cub::BlockScan<uint32_t, block_size>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; auto* const s = &state_g; uint32_t t = threadIdx.x; if (t == 0) { state_g = page_enc_state_s<0>{}; s->page = pages[blockIdx.x]; s->ck = *s->page.chunk; s->col = *s->ck.col_desc; s->rle_len_pos = nullptr; // get s->cur back to where it was at the end of encoding the rep and def level data s->cur = s->page.page_data + s->page.max_hdr_size + s->page.def_lvl_bytes + s->page.rep_lvl_bytes; // if V1 data page, need space for the RLE length fields if (s->page.page_type == PageType::DATA_PAGE) { if (s->col.num_def_level_bits() != 0) { s->cur += RLE_LENGTH_FIELD_LEN; } if (s->col.num_rep_level_bits() != 0) { s->cur += RLE_LENGTH_FIELD_LEN; } } } __syncthreads(); if (BitAnd(s->page.kernel_mask, encode_kernel_mask::PLAIN) == 0) { return; } // Encode data values __syncthreads(); auto const physical_type = s->col.physical_type; auto const type_id = s->col.leaf_column->type().id(); auto const dtype_len_out = physical_type_len(physical_type, type_id); auto const dtype_len_in = [&]() -> uint32_t { if (physical_type == INT32) { return int32_logical_len(type_id); } if (physical_type == INT96) { return sizeof(int64_t); } return dtype_len_out; }(); if (t == 0) { uint8_t* dst = s->cur; s->rle_run = 0; s->rle_pos = 0; s->rle_numvals = 0; s->rle_out = dst; s->page.encoding = determine_encoding(s->page.page_type, physical_type, s->ck.use_dictionary, write_v2_headers); s->page_start_val = row_to_value_idx(s->page.start_row, s->col); s->chunk_start_val = row_to_value_idx(s->ck.start_row, s->col); } __syncthreads(); uint32_t num_valid = 0; for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) { uint32_t nvals = min(s->page.num_leaf_values - cur_val_idx, block_size); uint32_t len, pos; auto [is_valid, val_idx] = [&]() { uint32_t val_idx; uint32_t is_valid; size_type const val_idx_in_block = cur_val_idx + t; if (s->page.page_type == PageType::DICTIONARY_PAGE) { val_idx = val_idx_in_block; is_valid = (val_idx < s->page.num_leaf_values); if (is_valid) { val_idx = s->ck.dict_data[val_idx]; } } else { size_type const val_idx_in_leaf_col = s->page_start_val + val_idx_in_block; is_valid = (val_idx_in_leaf_col < s->col.leaf_column->size() && val_idx_in_block < s->page.num_leaf_values) ? s->col.leaf_column->is_valid(val_idx_in_leaf_col) : 0; val_idx = val_idx_in_leaf_col; } return std::make_tuple(is_valid, val_idx); }(); if (is_valid) { num_valid++; } cur_val_idx += nvals; // Non-dictionary encoding uint8_t* dst = s->cur; if (is_valid) { len = dtype_len_out; if (physical_type == BYTE_ARRAY) { if (type_id == type_id::STRING) { len += s->col.leaf_column->element<string_view>(val_idx).size_bytes(); } else if (s->col.output_as_byte_array && type_id == type_id::LIST) { len += get_element<statistics::byte_array_view>(*s->col.leaf_column, val_idx).size_bytes(); } } } else { len = 0; } uint32_t total_len = 0; block_scan(temp_storage.scan_storage).ExclusiveSum(len, pos, total_len); __syncthreads(); if (t == 0) { s->cur = dst + total_len; } if (is_valid) { switch (physical_type) { case INT32: [[fallthrough]]; case FLOAT: { auto const v = [dtype_len = dtype_len_in, idx = val_idx, col = s->col.leaf_column, scale = s->col.ts_scale == 0 ? 1 : s->col.ts_scale]() -> int32_t { switch (dtype_len) { case 8: return col->element<int64_t>(idx) * scale; case 4: return col->element<int32_t>(idx) * scale; case 2: return col->element<int16_t>(idx) * scale; default: return col->element<int8_t>(idx) * scale; } }(); dst[pos + 0] = v; dst[pos + 1] = v >> 8; dst[pos + 2] = v >> 16; dst[pos + 3] = v >> 24; } break; case INT64: { int64_t v = s->col.leaf_column->element<int64_t>(val_idx); int32_t ts_scale = s->col.ts_scale; if (ts_scale != 0) { if (ts_scale < 0) { v /= -ts_scale; } else { v *= ts_scale; } } dst[pos + 0] = v; dst[pos + 1] = v >> 8; dst[pos + 2] = v >> 16; dst[pos + 3] = v >> 24; dst[pos + 4] = v >> 32; dst[pos + 5] = v >> 40; dst[pos + 6] = v >> 48; dst[pos + 7] = v >> 56; } break; case INT96: { int64_t v = s->col.leaf_column->element<int64_t>(val_idx); int32_t ts_scale = s->col.ts_scale; if (ts_scale != 0) { if (ts_scale < 0) { v /= -ts_scale; } else { v *= ts_scale; } } auto const [last_day_nanos, julian_days] = [&] { using namespace cuda::std::chrono; switch (s->col.leaf_column->type().id()) { case type_id::TIMESTAMP_SECONDS: case type_id::TIMESTAMP_MILLISECONDS: { return julian_days_with_time<cuda::std::milli>(v); } break; case type_id::TIMESTAMP_MICROSECONDS: case type_id::TIMESTAMP_NANOSECONDS: { return julian_days_with_time<cuda::std::micro>(v); } break; } return julian_days_with_time<cuda::std::nano>(0); }(); // the 12 bytes of fixed length data. v = last_day_nanos.count(); dst[pos + 0] = v; dst[pos + 1] = v >> 8; dst[pos + 2] = v >> 16; dst[pos + 3] = v >> 24; dst[pos + 4] = v >> 32; dst[pos + 5] = v >> 40; dst[pos + 6] = v >> 48; dst[pos + 7] = v >> 56; uint32_t w = julian_days.count(); dst[pos + 8] = w; dst[pos + 9] = w >> 8; dst[pos + 10] = w >> 16; dst[pos + 11] = w >> 24; } break; case DOUBLE: { auto v = s->col.leaf_column->element<double>(val_idx); memcpy(dst + pos, &v, 8); } break; case BYTE_ARRAY: { auto const bytes = [](cudf::type_id const type_id, column_device_view const* leaf_column, uint32_t const val_idx) -> void const* { switch (type_id) { case type_id::STRING: return reinterpret_cast<void const*>( leaf_column->element<string_view>(val_idx).data()); case type_id::LIST: return reinterpret_cast<void const*>( get_element<statistics::byte_array_view>(*(leaf_column), val_idx).data()); default: CUDF_UNREACHABLE("invalid type id for byte array writing!"); } }(type_id, s->col.leaf_column, val_idx); uint32_t v = len - 4; // string length dst[pos + 0] = v; dst[pos + 1] = v >> 8; dst[pos + 2] = v >> 16; dst[pos + 3] = v >> 24; if (v != 0) memcpy(dst + pos + 4, bytes, v); } break; case FIXED_LEN_BYTE_ARRAY: { if (type_id == type_id::DECIMAL128) { // When using FIXED_LEN_BYTE_ARRAY for decimals, the rep is encoded in big-endian auto const v = s->col.leaf_column->element<numeric::decimal128>(val_idx).value(); auto const v_char_ptr = reinterpret_cast<char const*>(&v); thrust::copy(thrust::seq, thrust::make_reverse_iterator(v_char_ptr + sizeof(v)), thrust::make_reverse_iterator(v_char_ptr), dst + pos); } } break; } } __syncthreads(); } uint32_t const valid_count = block_reduce(temp_storage.reduce_storage).Sum(num_valid); finish_page_encode<block_size>( s, valid_count, s->cur, pages, comp_in, comp_out, comp_results, write_v2_headers); } // DICTIONARY page data encoder // blockDim(128, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 8) gpuEncodeDictPages(device_span<EncPage> pages, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_results, bool write_v2_headers) { __shared__ __align__(8) rle_page_enc_state_s state_g; using block_reduce = cub::BlockReduce<uint32_t, block_size>; using block_scan = cub::BlockScan<uint32_t, block_size>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename block_scan::TempStorage scan_storage; } temp_storage; auto* const s = &state_g; uint32_t t = threadIdx.x; if (t == 0) { state_g = rle_page_enc_state_s{}; s->page = pages[blockIdx.x]; s->ck = *s->page.chunk; s->col = *s->ck.col_desc; s->rle_len_pos = nullptr; // get s->cur back to where it was at the end of encoding the rep and def level data s->cur = s->page.page_data + s->page.max_hdr_size + s->page.def_lvl_bytes + s->page.rep_lvl_bytes; // if V1 data page, need space for the RLE length fields if (s->page.page_type == PageType::DATA_PAGE) { if (s->col.num_def_level_bits() != 0) { s->cur += RLE_LENGTH_FIELD_LEN; } if (s->col.num_rep_level_bits() != 0) { s->cur += RLE_LENGTH_FIELD_LEN; } } } __syncthreads(); if (BitAnd(s->page.kernel_mask, encode_kernel_mask::DICTIONARY) == 0) { return; } // Encode data values __syncthreads(); auto const physical_type = s->col.physical_type; auto const type_id = s->col.leaf_column->type().id(); auto const dtype_len_out = physical_type_len(physical_type, type_id); auto const dtype_len_in = [&]() -> uint32_t { if (physical_type == INT32) { return int32_logical_len(type_id); } if (physical_type == INT96) { return sizeof(int64_t); } return dtype_len_out; }(); // TODO assert dict_bits >= 0 auto const dict_bits = (physical_type == BOOLEAN) ? 1 : (s->ck.use_dictionary and s->page.page_type != PageType::DICTIONARY_PAGE) ? s->ck.dict_rle_bits : -1; if (t == 0) { uint8_t* dst = s->cur; s->rle_run = 0; s->rle_pos = 0; s->rle_numvals = 0; s->rle_out = dst; s->page.encoding = determine_encoding(s->page.page_type, physical_type, s->ck.use_dictionary, write_v2_headers); if (dict_bits >= 0 && physical_type != BOOLEAN) { dst[0] = dict_bits; s->rle_out = dst + 1; } else if (write_v2_headers && physical_type == BOOLEAN) { // save space for RLE length. we don't know the total length yet. s->rle_out = dst + RLE_LENGTH_FIELD_LEN; s->rle_len_pos = dst; } s->page_start_val = row_to_value_idx(s->page.start_row, s->col); s->chunk_start_val = row_to_value_idx(s->ck.start_row, s->col); } __syncthreads(); uint32_t num_valid = 0; for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) { uint32_t nvals = min(s->page.num_leaf_values - cur_val_idx, block_size); auto [is_valid, val_idx] = [&]() { size_type const val_idx_in_block = cur_val_idx + t; size_type const val_idx_in_leaf_col = s->page_start_val + val_idx_in_block; uint32_t const is_valid = (val_idx_in_leaf_col < s->col.leaf_column->size() && val_idx_in_block < s->page.num_leaf_values) ? s->col.leaf_column->is_valid(val_idx_in_leaf_col) : 0; // need to test for use_dictionary because it might be boolean uint32_t const val_idx = (s->ck.use_dictionary) ? val_idx_in_leaf_col - s->chunk_start_val : val_idx_in_leaf_col; return std::make_tuple(is_valid, val_idx); }(); if (is_valid) { num_valid++; } cur_val_idx += nvals; // Dictionary encoding if (dict_bits > 0) { uint32_t rle_numvals; uint32_t rle_numvals_in_block; uint32_t pos; block_scan(temp_storage.scan_storage).ExclusiveSum(is_valid, pos, rle_numvals_in_block); rle_numvals = s->rle_numvals; if (is_valid) { uint32_t v; if (physical_type == BOOLEAN) { v = s->col.leaf_column->element<uint8_t>(val_idx); } else { v = s->ck.dict_index[val_idx]; } s->vals[rolling_idx(rle_numvals + pos)] = v; } rle_numvals += rle_numvals_in_block; __syncthreads(); if ((!write_v2_headers) && (physical_type == BOOLEAN)) { PlainBoolEncode(s, rle_numvals, (cur_val_idx == s->page.num_leaf_values), t); } else { RleEncode(s, rle_numvals, dict_bits, (cur_val_idx == s->page.num_leaf_values), t); } __syncthreads(); } if (t == 0) { s->cur = s->rle_out; } __syncthreads(); } uint32_t const valid_count = block_reduce(temp_storage.reduce_storage).Sum(num_valid); // save RLE length if necessary if (s->rle_len_pos != nullptr && t < 32) { // size doesn't include the 4 bytes for the length auto const rle_size = static_cast<uint32_t>(s->cur - s->rle_len_pos) - RLE_LENGTH_FIELD_LEN; if (t < RLE_LENGTH_FIELD_LEN) { s->rle_len_pos[t] = rle_size >> (t * 8); } __syncwarp(); } finish_page_encode<block_size>( s, valid_count, s->cur, pages, comp_in, comp_out, comp_results, write_v2_headers); } // DELTA_BINARY_PACKED page data encoder // blockDim(128, 1, 1) template <int block_size> __global__ void __launch_bounds__(block_size, 8) gpuEncodeDeltaBinaryPages(device_span<EncPage> pages, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_results) { // block of shared memory for value storage and bit packing __shared__ uleb128_t delta_shared[delta::buffer_size + delta::block_size]; __shared__ __align__(8) page_enc_state_s<0> state_g; using block_reduce = cub::BlockReduce<uint32_t, block_size>; __shared__ union { typename block_reduce::TempStorage reduce_storage; typename delta::index_scan::TempStorage delta_index_tmp; typename delta::block_reduce::TempStorage delta_reduce_tmp; typename delta::warp_reduce::TempStorage delta_warp_red_tmp[delta::num_mini_blocks]; } temp_storage; auto* const s = &state_g; uint32_t t = threadIdx.x; if (t == 0) { state_g = page_enc_state_s<0>{}; s->page = pages[blockIdx.x]; s->ck = *s->page.chunk; s->col = *s->ck.col_desc; s->rle_len_pos = nullptr; // get s->cur back to where it was at the end of encoding the rep and def level data s->cur = s->page.page_data + s->page.max_hdr_size + s->page.def_lvl_bytes + s->page.rep_lvl_bytes; } __syncthreads(); if (BitAnd(s->page.kernel_mask, encode_kernel_mask::DELTA_BINARY) == 0) { return; } // Encode data values __syncthreads(); auto const physical_type = s->col.physical_type; auto const type_id = s->col.leaf_column->type().id(); auto const dtype_len_out = physical_type_len(physical_type, type_id); auto const dtype_len_in = [&]() -> uint32_t { if (physical_type == INT32) { return int32_logical_len(type_id); } if (physical_type == INT96) { return sizeof(int64_t); } return dtype_len_out; }(); if (t == 0) { uint8_t* dst = s->cur; s->rle_run = 0; s->rle_pos = 0; s->rle_numvals = 0; s->rle_out = dst; s->page.encoding = Encoding::DELTA_BINARY_PACKED; s->page_start_val = row_to_value_idx(s->page.start_row, s->col); s->chunk_start_val = row_to_value_idx(s->ck.start_row, s->col); } __syncthreads(); // need to know the number of valid values for the null values calculation and to size // the delta binary encoder. uint32_t valid_count = 0; if (not s->col.leaf_column->nullable()) { valid_count = s->page.num_leaf_values; } else { uint32_t num_valid = 0; for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) { uint32_t const nvals = min(s->page.num_leaf_values - cur_val_idx, block_size); size_type const val_idx_in_block = cur_val_idx + t; size_type const val_idx_in_leaf_col = s->page_start_val + val_idx_in_block; if (val_idx_in_leaf_col < s->col.leaf_column->size() && val_idx_in_block < s->page.num_leaf_values && s->col.leaf_column->is_valid(val_idx_in_leaf_col)) { num_valid++; } cur_val_idx += nvals; } valid_count = block_reduce(temp_storage.reduce_storage).Sum(num_valid); } uint8_t const* delta_ptr = nullptr; // this will be the end of delta block pointer if (physical_type == INT32) { switch (dtype_len_in) { case 8: { // only DURATIONS map to 8 bytes, so safe to just use signed here? delta_ptr = delta_encode<int64_t>(s, valid_count, delta_shared, &temp_storage); break; } case 4: { if (type_id == type_id::UINT32) { delta_ptr = delta_encode<uint32_t>(s, valid_count, delta_shared, &temp_storage); } else { delta_ptr = delta_encode<int32_t>(s, valid_count, delta_shared, &temp_storage); } break; } case 2: { if (type_id == type_id::UINT16) { delta_ptr = delta_encode<uint16_t>(s, valid_count, delta_shared, &temp_storage); } else { delta_ptr = delta_encode<int16_t>(s, valid_count, delta_shared, &temp_storage); } break; } case 1: { if (type_id == type_id::UINT8) { delta_ptr = delta_encode<uint8_t>(s, valid_count, delta_shared, &temp_storage); } else { delta_ptr = delta_encode<int8_t>(s, valid_count, delta_shared, &temp_storage); } break; } default: CUDF_UNREACHABLE("invalid dtype_len_in when encoding DELTA_BINARY_PACKED"); } } else { if (type_id == type_id::UINT64) { delta_ptr = delta_encode<uint64_t>(s, valid_count, delta_shared, &temp_storage); } else { delta_ptr = delta_encode<int64_t>(s, valid_count, delta_shared, &temp_storage); } } finish_page_encode<block_size>( s, valid_count, delta_ptr, pages, comp_in, comp_out, comp_results, true); } constexpr int decide_compression_warps_in_block = 4; constexpr int decide_compression_block_size = decide_compression_warps_in_block * cudf::detail::warp_size; // blockDim(decide_compression_block_size, 1, 1) __global__ void __launch_bounds__(decide_compression_block_size) gpuDecideCompression(device_span<EncColumnChunk> chunks) { __shared__ __align__(8) EncColumnChunk ck_g[decide_compression_warps_in_block]; __shared__ __align__(4) unsigned int compression_error[decide_compression_warps_in_block]; using warp_reduce = cub::WarpReduce<uint32_t>; __shared__ typename warp_reduce::TempStorage temp_storage[decide_compression_warps_in_block][2]; auto const lane_id = threadIdx.x % cudf::detail::warp_size; auto const warp_id = threadIdx.x / cudf::detail::warp_size; auto const chunk_id = blockIdx.x * decide_compression_warps_in_block + warp_id; if (chunk_id >= chunks.size()) { return; } if (lane_id == 0) { ck_g[warp_id] = chunks[chunk_id]; compression_error[warp_id] = 0; } __syncwarp(); uint32_t uncompressed_data_size = 0; uint32_t compressed_data_size = 0; uint32_t encodings = 0; auto const num_pages = ck_g[warp_id].num_pages; for (auto page_id = lane_id; page_id < num_pages; page_id += cudf::detail::warp_size) { auto const& curr_page = ck_g[warp_id].pages[page_id]; auto const page_data_size = curr_page.max_data_size; auto const is_v2 = curr_page.page_type == PageType::DATA_PAGE_V2; auto const lvl_bytes = is_v2 ? curr_page.def_lvl_bytes + curr_page.rep_lvl_bytes : 0; uncompressed_data_size += page_data_size; if (auto comp_res = curr_page.comp_res; comp_res != nullptr) { compressed_data_size += comp_res->bytes_written + lvl_bytes; if (comp_res->status != compression_status::SUCCESS) { atomicOr(&compression_error[warp_id], 1); } } // collect encoding info for the chunk metadata encodings |= encoding_to_mask(curr_page.encoding); } uncompressed_data_size = warp_reduce(temp_storage[warp_id][0]).Sum(uncompressed_data_size); compressed_data_size = warp_reduce(temp_storage[warp_id][1]).Sum(compressed_data_size); __syncwarp(); encodings = warp_reduce(temp_storage[warp_id][0]).Reduce(encodings, BitwiseOr{}); __syncwarp(); if (lane_id == 0) { auto const write_compressed = compressed_data_size != 0 and compression_error[warp_id] == 0 and compressed_data_size < uncompressed_data_size; chunks[chunk_id].is_compressed = write_compressed; chunks[chunk_id].bfr_size = uncompressed_data_size; chunks[chunk_id].compressed_size = write_compressed ? compressed_data_size : uncompressed_data_size; // if there is repetition or definition level data add RLE encoding auto const rle_bits = ck_g[warp_id].col_desc->num_def_level_bits() + ck_g[warp_id].col_desc->num_rep_level_bits(); if (rle_bits > 0) { encodings |= encoding_to_mask(Encoding::RLE); } chunks[chunk_id].encodings = encodings; } } /** * Minimal thrift compact protocol support */ inline __device__ uint8_t* cpw_put_uint8(uint8_t* p, uint8_t v) { *p++ = v; return p; } inline __device__ uint8_t* cpw_put_uint32(uint8_t* p, uint32_t v) { while (v > 0x7f) { *p++ = v | 0x80; v >>= 7; } *p++ = v; return p; } inline __device__ uint8_t* cpw_put_uint64(uint8_t* p, uint64_t v) { while (v > 0x7f) { *p++ = v | 0x80; v >>= 7; } *p++ = v; return p; } inline __device__ uint8_t* cpw_put_int32(uint8_t* p, int32_t v) { int32_t s = (v < 0); return cpw_put_uint32(p, (v ^ -s) * 2 + s); } inline __device__ uint8_t* cpw_put_int64(uint8_t* p, int64_t v) { int64_t s = (v < 0); return cpw_put_uint64(p, (v ^ -s) * 2 + s); } inline __device__ uint8_t* cpw_put_fldh(uint8_t* p, int f, int cur, int t) { if (f > cur && f <= cur + 15) { *p++ = ((f - cur) << 4) | t; return p; } else { *p++ = t; return cpw_put_int32(p, f); } } class header_encoder { uint8_t* current_header_ptr; int current_field_index; public: inline __device__ header_encoder(uint8_t* header_start) : current_header_ptr(header_start), current_field_index(0) { } inline __device__ void field_struct_begin(int field) { current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_STRUCT); current_field_index = 0; } inline __device__ void field_struct_end(int field) { *current_header_ptr++ = 0; current_field_index = field; } inline __device__ void field_list_begin(int field, size_t len, int type) { current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_LIST); current_header_ptr = cpw_put_uint8( current_header_ptr, static_cast<uint8_t>((std::min(len, size_t{0xfu}) << 4) | type)); if (len >= 0xf) { current_header_ptr = cpw_put_uint32(current_header_ptr, len); } current_field_index = 0; } inline __device__ void field_list_end(int field) { current_field_index = field; } inline __device__ void put_bool(bool value) { current_header_ptr = cpw_put_uint8(current_header_ptr, value ? ST_FLD_TRUE : ST_FLD_FALSE); } inline __device__ void put_binary(void const* value, uint32_t length) { current_header_ptr = cpw_put_uint32(current_header_ptr, length); memcpy(current_header_ptr, value, length); current_header_ptr += length; } template <typename T> inline __device__ void put_int64(T value) { current_header_ptr = cpw_put_int64(current_header_ptr, static_cast<int64_t>(value)); } inline __device__ void field_bool(int field, bool value) { current_header_ptr = cpw_put_fldh( current_header_ptr, field, current_field_index, value ? ST_FLD_TRUE : ST_FLD_FALSE); current_field_index = field; } template <typename T> inline __device__ void field_int32(int field, T value) { current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I32); current_header_ptr = cpw_put_int32(current_header_ptr, static_cast<int32_t>(value)); current_field_index = field; } template <typename T> inline __device__ void field_int64(int field, T value) { current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I64); current_header_ptr = cpw_put_int64(current_header_ptr, static_cast<int64_t>(value)); current_field_index = field; } inline __device__ void field_binary(int field, void const* value, uint32_t length) { current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_BINARY); current_header_ptr = cpw_put_uint32(current_header_ptr, length); memcpy(current_header_ptr, value, length); current_header_ptr += length; current_field_index = field; } inline __device__ void end(uint8_t** header_end, bool termination_flag = true) { if (not termination_flag) { *current_header_ptr++ = 0; } *header_end = current_header_ptr; } inline __device__ uint8_t* get_ptr() { return current_header_ptr; } inline __device__ void set_ptr(uint8_t* ptr) { current_header_ptr = ptr; } }; namespace { // byteswap 128 bit integer, placing result in dst in network byte order. // dst must point to at least 16 bytes of memory. __device__ void byte_reverse128(__int128_t v, void* dst) { auto const v_char_ptr = reinterpret_cast<unsigned char const*>(&v); auto const d_char_ptr = static_cast<unsigned char*>(dst); thrust::copy(thrust::seq, thrust::make_reverse_iterator(v_char_ptr + sizeof(v)), thrust::make_reverse_iterator(v_char_ptr), d_char_ptr); } /** * @brief Test to see if a span contains all valid UTF-8 characters. * * @param span device_span to test. * @return true if the span contains all valid UTF-8 characters. */ __device__ bool is_valid_utf8(device_span<unsigned char const> span) { auto idx = 0; while (idx < span.size_bytes()) { // UTF-8 character should start with valid beginning bit pattern if (not strings::detail::is_valid_begin_utf8_char(span[idx])) { return false; } // subsequent elements of the character should be continuation chars auto const width = strings::detail::bytes_in_utf8_byte(span[idx++]); for (size_type i = 1; i < width && idx < span.size_bytes(); i++, idx++) { if (not strings::detail::is_utf8_continuation_char(span[idx])) { return false; } } } return true; } /** * @brief Increment part of a UTF-8 character. * * Attempt to increment the char pointed to by ptr, which is assumed to be part of a valid UTF-8 * character. Returns true if successful, false if the increment caused an overflow, in which case * the data at ptr will be set to the lowest valid UTF-8 bit pattern (start or continuation). * Will halt execution if passed invalid UTF-8. */ __device__ bool increment_utf8_at(unsigned char* ptr) { unsigned char elem = *ptr; // elem is one of (no 5 or 6 byte chars allowed): // 0b0vvvvvvv a 1 byte character // 0b10vvvvvv a continuation byte // 0b110vvvvv start of a 2 byte character // 0b1110vvvv start of a 3 byte character // 0b11110vvv start of a 4 byte character // TODO(ets): starting at 4 byte and working down. Should probably start low and work higher. uint8_t mask = 0xF8; uint8_t valid = 0xF0; while (mask != 0) { if ((elem & mask) == valid) { elem++; if ((elem & mask) != mask) { // no overflow *ptr = elem; return true; } *ptr = valid; return false; } mask <<= 1; valid <<= 1; } // should not reach here since we test for valid UTF-8 higher up the call chain CUDF_UNREACHABLE("Trying to increment non-utf8"); } /** * @brief Attempt to truncate a span of UTF-8 characters to at most truncate_length_bytes. * * If is_min is false, then the final character (or characters if there is overflow) will be * incremented so that the resultant UTF-8 will still be a valid maximum. scratch is only used when * is_min is false, and must be at least truncate_length bytes in size. If the span cannot be * truncated, leave it untouched and return the original length. * * @return Pair object containing a pointer to the truncated data and its length. */ __device__ std::pair<void const*, uint32_t> truncate_utf8(device_span<unsigned char const> span, bool is_min, void* scratch, int32_t truncate_length) { // we know at this point that truncate_length < size_bytes, so // there is data at [len]. work backwards until we find // the start of a UTF-8 encoded character, since UTF-8 characters may be multi-byte. auto len = truncate_length; while (not strings::detail::is_begin_utf8_char(span[len]) && len > 0) { len--; } if (len != 0) { if (is_min) { return {span.data(), len}; } memcpy(scratch, span.data(), len); // increment last byte, working backwards if the byte overflows auto const ptr = static_cast<unsigned char*>(scratch); for (int32_t i = len - 1; i >= 0; i--) { if (increment_utf8_at(&ptr[i])) { // true if no overflow return {scratch, len}; } } // cannot increment, so fall through } // couldn't truncate, return original value return {span.data(), span.size_bytes()}; } /** * @brief Attempt to truncate a span of binary data to at most truncate_length bytes. * * If is_min is false, then the final byte (or bytes if there is overflow) will be * incremented so that the resultant binary will still be a valid maximum. scratch is only used when * is_min is false, and must be at least truncate_length bytes in size. If the span cannot be * truncated, leave it untouched and return the original length. * * @return Pair object containing a pointer to the truncated data and its length. */ __device__ std::pair<void const*, uint32_t> truncate_binary(device_span<uint8_t const> arr, bool is_min, void* scratch, int32_t truncate_length) { if (is_min) { return {arr.data(), truncate_length}; } memcpy(scratch, arr.data(), truncate_length); // increment last byte, working backwards if the byte overflows auto const ptr = static_cast<uint8_t*>(scratch); for (int32_t i = truncate_length - 1; i >= 0; i--) { ptr[i]++; if (ptr[i] != 0) { // no overflow return {scratch, i + 1}; } } // couldn't truncate, return original value return {arr.data(), arr.size_bytes()}; } // TODO (ets): the assumption here is that string columns might have UTF-8 or plain binary, // while binary columns are assumed to be binary and will be treated as such. If this assumption // is incorrect, then truncate_byte_array() and truncate_string() should just be combined into // a single function. /** * @brief Attempt to truncate a UTF-8 string to at most truncate_length bytes. */ __device__ std::pair<void const*, uint32_t> truncate_string(string_view const& str, bool is_min, void* scratch, int32_t truncate_length) { if (truncate_length == NO_TRUNC_STATS or str.size_bytes() <= truncate_length) { return {str.data(), str.size_bytes()}; } // convert char to unsigned since UTF-8 is just bytes, not chars. can't use std::byte because // that can't be incremented. auto const span = device_span<unsigned char const>( reinterpret_cast<unsigned char const*>(str.data()), str.size_bytes()); // if str is all 8-bit chars, or is actually not UTF-8, then we can just use truncate_binary() if (str.size_bytes() != str.length() and is_valid_utf8(span.first(truncate_length))) { return truncate_utf8(span, is_min, scratch, truncate_length); } return truncate_binary(span, is_min, scratch, truncate_length); } /** * @brief Attempt to truncate a binary array to at most truncate_length bytes. */ __device__ std::pair<void const*, uint32_t> truncate_byte_array( statistics::byte_array_view const& arr, bool is_min, void* scratch, int32_t truncate_length) { if (truncate_length == NO_TRUNC_STATS or arr.size_bytes() <= truncate_length) { return {arr.data(), arr.size_bytes()}; } // convert std::byte to uint8_t since bytes can't be incremented device_span<uint8_t const> const span{reinterpret_cast<uint8_t const*>(arr.data()), arr.size_bytes()}; return truncate_binary(span, is_min, scratch, truncate_length); } /** * @brief Find a min or max value of the proper form to be included in Parquet statistics * structures. * * Given a statistics_val union and a data type, perform any transformations needed to produce a * valid min or max binary value. String and byte array types will be truncated if they exceed * truncate_length. */ __device__ std::pair<void const*, uint32_t> get_extremum(statistics_val const* stats_val, statistics_dtype dtype, void* scratch, bool is_min, int32_t truncate_length) { switch (dtype) { case dtype_bool: return {stats_val, sizeof(bool)}; case dtype_int8: case dtype_int16: case dtype_int32: case dtype_date32: return {stats_val, sizeof(int32_t)}; case dtype_float32: { auto const fp_scratch = static_cast<float*>(scratch); fp_scratch[0] = stats_val->fp_val; return {scratch, sizeof(float)}; } case dtype_int64: case dtype_timestamp64: case dtype_float64: return {stats_val, sizeof(int64_t)}; case dtype_decimal64: case dtype_decimal128: byte_reverse128(stats_val->d128_val, scratch); return {scratch, sizeof(__int128_t)}; case dtype_string: return truncate_string(stats_val->str_val, is_min, scratch, truncate_length); case dtype_byte_array: return truncate_byte_array(stats_val->byte_val, is_min, scratch, truncate_length); default: CUDF_UNREACHABLE("Invalid statistics data type"); } } } // namespace __device__ uint8_t* EncodeStatistics(uint8_t* start, statistics_chunk const* s, statistics_dtype dtype, void* scratch) { uint8_t* end; header_encoder encoder(start); encoder.field_int64(3, s->null_count); if (s->has_minmax) { auto const [max_ptr, max_size] = get_extremum(&s->max_value, dtype, scratch, false, NO_TRUNC_STATS); encoder.field_binary(5, max_ptr, max_size); auto const [min_ptr, min_size] = get_extremum(&s->min_value, dtype, scratch, true, NO_TRUNC_STATS); encoder.field_binary(6, min_ptr, min_size); } encoder.end(&end); return end; } // blockDim(128, 1, 1) __global__ void __launch_bounds__(128) gpuEncodePageHeaders(device_span<EncPage> pages, device_span<compression_result const> comp_results, device_span<statistics_chunk const> page_stats, statistics_chunk const* chunk_stats) { // When this whole kernel becomes single thread, the following variables need not be __shared__ __shared__ __align__(8) parquet_column_device_view col_g; __shared__ __align__(8) EncColumnChunk ck_g; __shared__ __align__(8) EncPage page_g; __shared__ __align__(8) unsigned char scratch[MIN_STATS_SCRATCH_SIZE]; auto const t = threadIdx.x; if (t == 0) { uint8_t *hdr_start, *hdr_end; uint32_t compressed_page_size, uncompressed_page_size; page_g = pages[blockIdx.x]; ck_g = *page_g.chunk; col_g = *ck_g.col_desc; if (chunk_stats && &pages[blockIdx.x] == ck_g.pages) { // Is this the first page in a chunk? hdr_start = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr; hdr_end = EncodeStatistics(hdr_start, &chunk_stats[page_g.chunk_id], col_g.stats_dtype, scratch); page_g.chunk->ck_stat_size = static_cast<uint32_t>(hdr_end - hdr_start); } uncompressed_page_size = page_g.max_data_size; if (ck_g.is_compressed) { auto const is_v2 = page_g.page_type == PageType::DATA_PAGE_V2; auto const lvl_bytes = is_v2 ? page_g.def_lvl_bytes + page_g.rep_lvl_bytes : 0; hdr_start = page_g.compressed_data; compressed_page_size = static_cast<uint32_t>(comp_results[blockIdx.x].bytes_written) + lvl_bytes; page_g.max_data_size = compressed_page_size; } else { hdr_start = page_g.page_data; compressed_page_size = uncompressed_page_size; } header_encoder encoder(hdr_start); PageType page_type = page_g.page_type; encoder.field_int32(1, page_type); encoder.field_int32(2, uncompressed_page_size); encoder.field_int32(3, compressed_page_size); if (page_type == PageType::DATA_PAGE) { // DataPageHeader encoder.field_struct_begin(5); encoder.field_int32(1, page_g.num_values); // NOTE: num_values != num_rows for list types encoder.field_int32(2, page_g.encoding); // encoding encoder.field_int32(3, Encoding::RLE); // definition_level_encoding encoder.field_int32(4, Encoding::RLE); // repetition_level_encoding // Optionally encode page-level statistics if (not page_stats.empty()) { encoder.field_struct_begin(5); encoder.set_ptr( EncodeStatistics(encoder.get_ptr(), &page_stats[blockIdx.x], col_g.stats_dtype, scratch)); encoder.field_struct_end(5); } encoder.field_struct_end(5); } else if (page_type == PageType::DATA_PAGE_V2) { // DataPageHeaderV2 encoder.field_struct_begin(8); encoder.field_int32(1, page_g.num_values); encoder.field_int32(2, page_g.num_nulls); encoder.field_int32(3, page_g.num_rows); encoder.field_int32(4, page_g.encoding); encoder.field_int32(5, page_g.def_lvl_bytes); encoder.field_int32(6, page_g.rep_lvl_bytes); encoder.field_bool(7, ck_g.is_compressed); // TODO can compress at page level now // Optionally encode page-level statistics if (not page_stats.empty()) { encoder.field_struct_begin(8); encoder.set_ptr( EncodeStatistics(encoder.get_ptr(), &page_stats[blockIdx.x], col_g.stats_dtype, scratch)); encoder.field_struct_end(8); } encoder.field_struct_end(8); } else { // DictionaryPageHeader encoder.field_struct_begin(7); encoder.field_int32(1, ck_g.num_dict_entries); // number of values in dictionary encoder.field_int32(2, page_g.encoding); encoder.field_struct_end(7); } encoder.end(&hdr_end, false); page_g.hdr_size = (uint32_t)(hdr_end - hdr_start); } __syncthreads(); if (t == 0) pages[blockIdx.x] = page_g; } // blockDim(1024, 1, 1) __global__ void __launch_bounds__(1024) gpuGatherPages(device_span<EncColumnChunk> chunks, device_span<EncPage const> pages) { __shared__ __align__(8) EncColumnChunk ck_g; __shared__ __align__(8) EncPage page_g; auto const t = threadIdx.x; uint8_t *dst, *dst_base; EncPage const* first_page; uint32_t num_pages, uncompressed_size; if (t == 0) ck_g = chunks[blockIdx.x]; __syncthreads(); first_page = ck_g.pages; num_pages = ck_g.num_pages; dst = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr; dst += ck_g.ck_stat_size; // Skip over chunk statistics dst_base = dst; uncompressed_size = ck_g.bfr_size; for (uint32_t page = 0; page < num_pages; page++) { uint8_t const* src; uint32_t hdr_len, data_len; if (t == 0) { page_g = first_page[page]; } __syncthreads(); src = (ck_g.is_compressed) ? page_g.compressed_data : page_g.page_data; // Copy page header hdr_len = page_g.hdr_size; memcpy_block<1024, true>(dst, src, hdr_len, t); src += page_g.max_hdr_size; dst += hdr_len; // Copy page data uncompressed_size += hdr_len; data_len = page_g.max_data_size; memcpy_block<1024, true>(dst, src, data_len, t); dst += data_len; __syncthreads(); if (!t && page == 0 && ck_g.use_dictionary) { ck_g.dictionary_size = hdr_len + data_len; } } if (t == 0) { chunks[blockIdx.x].bfr_size = uncompressed_size; chunks[blockIdx.x].compressed_size = (dst - dst_base); if (ck_g.use_dictionary) { chunks[blockIdx.x].dictionary_size = ck_g.dictionary_size; } } } namespace { /** * @brief Tests if statistics are comparable given the column's * physical and converted types */ __device__ bool is_comparable(Type ptype, ConvertedType ctype) { switch (ptype) { case Type::BOOLEAN: case Type::INT32: case Type::INT64: case Type::FLOAT: case Type::DOUBLE: case Type::BYTE_ARRAY: return true; case Type::FIXED_LEN_BYTE_ARRAY: if (ctype == ConvertedType::DECIMAL) { return true; } [[fallthrough]]; default: return false; } } /** * @brief Compares two values. * @return -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2 */ template <typename T> constexpr __device__ int32_t compare(T& v1, T& v2) { return (v1 > v2) - (v1 < v2); } /** * @brief Compares two statistics_val structs. * @return < 0 if v1 < v2, 0 if v1 == v2, > 0 if v1 > v2 */ __device__ int32_t compare_values(Type ptype, ConvertedType ctype, statistics_val const& v1, statistics_val const& v2) { switch (ptype) { case Type::BOOLEAN: return compare(v1.u_val, v2.u_val); case Type::INT32: case Type::INT64: switch (ctype) { case ConvertedType::UINT_8: case ConvertedType::UINT_16: case ConvertedType::UINT_32: case ConvertedType::UINT_64: return compare(v1.u_val, v2.u_val); default: // assume everything else is signed return compare(v1.i_val, v2.i_val); } case Type::FLOAT: case Type::DOUBLE: return compare(v1.fp_val, v2.fp_val); case Type::BYTE_ARRAY: return static_cast<string_view>(v1.str_val).compare(v2.str_val); case Type::FIXED_LEN_BYTE_ARRAY: if (ctype == ConvertedType::DECIMAL) { return compare(v1.d128_val, v2.d128_val); } } // calling is_comparable() should prevent reaching here CUDF_UNREACHABLE("Trying to compare non-comparable type"); return 0; } /** * @brief Determine if a set of statstistics are in ascending order. */ __device__ bool is_ascending(statistics_chunk const* s, Type ptype, ConvertedType ctype, uint32_t num_pages) { for (uint32_t i = 1; i < num_pages; i++) { if (compare_values(ptype, ctype, s[i - 1].min_value, s[i].min_value) > 0 || compare_values(ptype, ctype, s[i - 1].max_value, s[i].max_value) > 0) { return false; } } return true; } /** * @brief Determine if a set of statstistics are in descending order. */ __device__ bool is_descending(statistics_chunk const* s, Type ptype, ConvertedType ctype, uint32_t num_pages) { for (uint32_t i = 1; i < num_pages; i++) { if (compare_values(ptype, ctype, s[i - 1].min_value, s[i].min_value) < 0 || compare_values(ptype, ctype, s[i - 1].max_value, s[i].max_value) < 0) { return false; } } return true; } /** * @brief Determine the ordering of a set of statistics. */ __device__ int32_t calculate_boundary_order(statistics_chunk const* s, Type ptype, ConvertedType ctype, uint32_t num_pages) { if (not is_comparable(ptype, ctype)) { return BoundaryOrder::UNORDERED; } if (is_ascending(s, ptype, ctype, num_pages)) { return BoundaryOrder::ASCENDING; } else if (is_descending(s, ptype, ctype, num_pages)) { return BoundaryOrder::DESCENDING; } return BoundaryOrder::UNORDERED; } // align ptr to an 8-byte boundary. address returned will be <= ptr. constexpr __device__ void* align8(void* ptr) { // it's ok to round down because we have an extra 7 bytes in the buffer auto algn = 3 & reinterpret_cast<std::uintptr_t>(ptr); return static_cast<char*>(ptr) - algn; } struct mask_tform { __device__ uint32_t operator()(EncPage const& p) { return static_cast<uint32_t>(p.kernel_mask); } }; } // namespace // blockDim(1, 1, 1) __global__ void __launch_bounds__(1) gpuEncodeColumnIndexes(device_span<EncColumnChunk> chunks, device_span<statistics_chunk const> column_stats, int32_t column_index_truncate_length) { __align__(8) unsigned char s_scratch[MIN_STATS_SCRATCH_SIZE]; uint8_t* col_idx_end; if (column_stats.empty()) { return; } EncColumnChunk* ck_g = &chunks[blockIdx.x]; uint32_t num_pages = ck_g->num_pages; parquet_column_device_view col_g = *ck_g->col_desc; size_t first_data_page = ck_g->use_dictionary ? 1 : 0; uint32_t pageidx = ck_g->first_page; header_encoder encoder(ck_g->column_index_blob); // make sure scratch is aligned properly. here column_index_size indicates // how much scratch space is available for this chunk, including space for // truncation scratch + padding for alignment. void* scratch = column_index_truncate_length < MIN_STATS_SCRATCH_SIZE ? s_scratch : align8(ck_g->column_index_blob + ck_g->column_index_size - column_index_truncate_length); // null_pages encoder.field_list_begin(1, num_pages - first_data_page, ST_FLD_TRUE); for (uint32_t page = first_data_page; page < num_pages; page++) { encoder.put_bool(column_stats[pageidx + page].non_nulls == 0); } encoder.field_list_end(1); // min_values encoder.field_list_begin(2, num_pages - first_data_page, ST_FLD_BINARY); for (uint32_t page = first_data_page; page < num_pages; page++) { auto const [min_ptr, min_size] = get_extremum(&column_stats[pageidx + page].min_value, col_g.stats_dtype, scratch, true, column_index_truncate_length); encoder.put_binary(min_ptr, min_size); } encoder.field_list_end(2); // max_values encoder.field_list_begin(3, num_pages - first_data_page, ST_FLD_BINARY); for (uint32_t page = first_data_page; page < num_pages; page++) { auto const [max_ptr, max_size] = get_extremum(&column_stats[pageidx + page].max_value, col_g.stats_dtype, scratch, false, column_index_truncate_length); encoder.put_binary(max_ptr, max_size); } encoder.field_list_end(3); // boundary_order encoder.field_int32(4, calculate_boundary_order(&column_stats[first_data_page + pageidx], col_g.physical_type, col_g.converted_type, num_pages - first_data_page)); // null_counts encoder.field_list_begin(5, num_pages - first_data_page, ST_FLD_I64); for (uint32_t page = first_data_page; page < num_pages; page++) { encoder.put_int64(column_stats[pageidx + page].null_count); } encoder.field_list_end(5); encoder.end(&col_idx_end, false); // now reset column_index_size to the actual size of the encoded column index blob ck_g->column_index_size = static_cast<uint32_t>(col_idx_end - ck_g->column_index_blob); } void InitRowGroupFragments(device_2dspan<PageFragment> frag, device_span<parquet_column_device_view const> col_desc, device_span<partition_info const> partitions, device_span<int const> part_frag_offset, uint32_t fragment_size, rmm::cuda_stream_view stream) { auto const num_columns = frag.size().first; auto const num_fragments_per_column = frag.size().second; auto const grid_y = std::min(static_cast<uint32_t>(num_fragments_per_column), MAX_GRID_Y_SIZE); dim3 const dim_grid(num_columns, grid_y); // 1 threadblock per fragment gpuInitRowGroupFragments<512><<<dim_grid, 512, 0, stream.value()>>>( frag, col_desc, partitions, part_frag_offset, fragment_size); } void CalculatePageFragments(device_span<PageFragment> frag, device_span<size_type const> column_frag_sizes, rmm::cuda_stream_view stream) { gpuCalculatePageFragments<512><<<frag.size(), 512, 0, stream.value()>>>(frag, column_frag_sizes); } void InitFragmentStatistics(device_span<statistics_group> groups, device_span<PageFragment const> fragments, rmm::cuda_stream_view stream) { int const num_fragments = fragments.size(); int const dim = util::div_rounding_up_safe(num_fragments, encode_block_size / cudf::detail::warp_size); gpuInitFragmentStats<<<dim, encode_block_size, 0, stream.value()>>>(groups, fragments); } void InitEncoderPages(device_2dspan<EncColumnChunk> chunks, device_span<EncPage> pages, device_span<size_type> page_sizes, device_span<size_type> comp_page_sizes, device_span<parquet_column_device_view const> col_desc, int32_t num_columns, size_t max_page_size_bytes, size_type max_page_size_rows, uint32_t page_align, bool write_v2_headers, statistics_merge_group* page_grstats, statistics_merge_group* chunk_grstats, rmm::cuda_stream_view stream) { auto num_rowgroups = chunks.size().first; dim3 dim_grid(num_columns, num_rowgroups); // 1 threadblock per rowgroup gpuInitPages<<<dim_grid, encode_block_size, 0, stream.value()>>>(chunks, pages, page_sizes, comp_page_sizes, col_desc, page_grstats, chunk_grstats, num_columns, max_page_size_bytes, max_page_size_rows, page_align, write_v2_headers); } void EncodePages(device_span<EncPage> pages, bool write_v2_headers, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_results, rmm::cuda_stream_view stream) { auto num_pages = pages.size(); // determine which kernels to invoke auto mask_iter = thrust::make_transform_iterator(pages.begin(), mask_tform{}); uint32_t kernel_mask = thrust::reduce( rmm::exec_policy(stream), mask_iter, mask_iter + pages.size(), 0U, thrust::bit_or<uint32_t>{}); // get the number of streams we need from the pool int nkernels = std::bitset<32>(kernel_mask).count(); auto streams = cudf::detail::fork_streams(stream, nkernels); // A page is part of one column. This is launching 1 block per page. 1 block will exclusively // deal with one datatype. int s_idx = 0; if (BitAnd(kernel_mask, encode_kernel_mask::PLAIN) != 0) { auto const strm = streams[s_idx++]; gpuEncodePageLevels<encode_block_size><<<num_pages, encode_block_size, 0, strm.value()>>>( pages, write_v2_headers, encode_kernel_mask::PLAIN); gpuEncodePages<encode_block_size><<<num_pages, encode_block_size, 0, strm.value()>>>( pages, comp_in, comp_out, comp_results, write_v2_headers); } if (BitAnd(kernel_mask, encode_kernel_mask::DELTA_BINARY) != 0) { auto const strm = streams[s_idx++]; gpuEncodePageLevels<encode_block_size><<<num_pages, encode_block_size, 0, strm.value()>>>( pages, write_v2_headers, encode_kernel_mask::DELTA_BINARY); gpuEncodeDeltaBinaryPages<encode_block_size> <<<num_pages, encode_block_size, 0, strm.value()>>>(pages, comp_in, comp_out, comp_results); } if (BitAnd(kernel_mask, encode_kernel_mask::DICTIONARY) != 0) { auto const strm = streams[s_idx++]; gpuEncodePageLevels<encode_block_size><<<num_pages, encode_block_size, 0, strm.value()>>>( pages, write_v2_headers, encode_kernel_mask::DICTIONARY); gpuEncodeDictPages<encode_block_size><<<num_pages, encode_block_size, 0, strm.value()>>>( pages, comp_in, comp_out, comp_results, write_v2_headers); } cudf::detail::join_streams(streams, stream); } void DecideCompression(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream) { auto const num_blocks = util::div_rounding_up_safe<int>(chunks.size(), decide_compression_warps_in_block); gpuDecideCompression<<<num_blocks, decide_compression_block_size, 0, stream.value()>>>(chunks); } void EncodePageHeaders(device_span<EncPage> pages, device_span<compression_result const> comp_results, device_span<statistics_chunk const> page_stats, statistics_chunk const* chunk_stats, rmm::cuda_stream_view stream) { // TODO: single thread task. No need for 128 threads/block. Earlier it used to employ rest of the // threads to coop load structs gpuEncodePageHeaders<<<pages.size(), encode_block_size, 0, stream.value()>>>( pages, comp_results, page_stats, chunk_stats); } void GatherPages(device_span<EncColumnChunk> chunks, device_span<EncPage const> pages, rmm::cuda_stream_view stream) { gpuGatherPages<<<chunks.size(), 1024, 0, stream.value()>>>(chunks, pages); } void EncodeColumnIndexes(device_span<EncColumnChunk> chunks, device_span<statistics_chunk const> column_stats, int32_t column_index_truncate_length, rmm::cuda_stream_view stream) { gpuEncodeColumnIndexes<<<chunks.size(), 1, 0, stream.value()>>>( chunks, column_stats, column_index_truncate_length); } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/reader_impl_preprocess.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "error.hpp" #include "reader_impl.hpp" #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/config_utils.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #include <numeric> namespace cudf::io::parquet::detail { namespace { /** * @brief Generate depth remappings for repetition and definition levels. * * When dealing with columns that contain lists, we must examine incoming * repetition and definition level pairs to determine what range of output nesting * is indicated when adding new values. This function generates the mappings of * the R/D levels to those start/end bounds * * @param remap Maps column schema index to the R/D remapping vectors for that column * @param src_col_schema The column schema to generate the new mapping for * @param md File metadata information */ void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap, int src_col_schema, aggregate_reader_metadata const& md) { // already generated for this level if (remap.find(src_col_schema) != remap.end()) { return; } auto schema = md.get_schema(src_col_schema); int max_depth = md.get_output_nesting_depth(src_col_schema); CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(), "Attempting to remap a schema more than once"); auto inserted = remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}}); auto& depth_remap = inserted.first->second; std::vector<int>& rep_depth_remap = (depth_remap.first); rep_depth_remap.resize(schema.max_repetition_level + 1); std::vector<int>& def_depth_remap = (depth_remap.second); def_depth_remap.resize(schema.max_definition_level + 1); // the key: // for incoming level values R/D // add values starting at the shallowest nesting level X has repetition level R // until you reach the deepest nesting level Y that corresponds to the repetition level R1 // held by the nesting level that has definition level D // // Example: a 3 level struct with a list at the bottom // // R / D Depth // level0 0 / 1 0 // level1 0 / 2 1 // level2 0 / 3 2 // list 0 / 3 3 // element 1 / 4 4 // // incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0) // incoming R/D : 0, 1 -> add values from depth 0 to 3 // incoming R/D : 0, 2 -> add values from depth 0 to 3 // incoming R/D : 1, 4 -> add values from depth 4 to 4 // // Note : the -validity- of values is simply checked by comparing the incoming D value against the // D value of the given nesting level (incoming D >= the D for the nesting level == valid, // otherwise NULL). The tricky part is determining what nesting levels to add values at. // // For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting // depth. // // compute "X" from above for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) { auto find_shallowest = [&](int r) { int shallowest = -1; int cur_depth = max_depth - 1; int schema_idx = src_col_schema; while (schema_idx > 0) { auto cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r) { // if this is a repeated field, map it one level deeper shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth; } // if it's one-level encoding list else if (cur_schema.is_one_level_list(md.get_schema(cur_schema.parent_idx))) { shallowest = cur_depth - 1; } if (!cur_schema.is_stub()) { cur_depth--; } schema_idx = cur_schema.parent_idx; } return shallowest; }; rep_depth_remap[s_idx] = find_shallowest(s_idx); } // compute "Y" from above for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) { auto find_deepest = [&](int d) { SchemaElement prev_schema; int schema_idx = src_col_schema; int r1 = 0; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_definition_level == d) { // if this is a repeated field, map it one level deeper r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level : cur_schema.max_repetition_level; break; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } // we now know R1 from above. return the deepest nesting level that has the // same repetition level schema_idx = src_col_schema; int depth = max_depth - 1; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r1) { // if this is a repeated field, map it one level deeper depth = cur_schema.is_stub() ? depth + 1 : depth; break; } if (!cur_schema.is_stub()) { depth--; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } return depth; }; def_depth_remap[s_idx] = find_deepest(s_idx); } } /** * @brief Reads compressed page data to device memory. * * @param sources Dataset sources * @param page_data Buffers to hold compressed page data for each chunk * @param chunks List of column chunk descriptors * @param begin_chunk Index of first column chunk to read * @param end_chunk Index after the last column chunk to read * @param column_chunk_offsets File offset for all chunks * @param chunk_source_map Association between each column chunk and its source * @param stream CUDA stream used for device memory operations and kernel launches * * @return A future object for reading synchronization */ [[nodiscard]] std::future<void> read_column_chunks_async( std::vector<std::unique_ptr<datasource>> const& sources, std::vector<std::unique_ptr<datasource::buffer>>& page_data, cudf::detail::hostdevice_vector<ColumnChunkDesc>& chunks, size_t begin_chunk, size_t end_chunk, std::vector<size_t> const& column_chunk_offsets, std::vector<size_type> const& chunk_source_map, rmm::cuda_stream_view stream) { // Transfer chunk data, coalescing adjacent chunks std::vector<std::future<size_t>> read_tasks; for (size_t chunk = begin_chunk; chunk < end_chunk;) { size_t const io_offset = column_chunk_offsets[chunk]; size_t io_size = chunks[chunk].compressed_size; size_t next_chunk = chunk + 1; bool const is_compressed = (chunks[chunk].codec != Compression::UNCOMPRESSED); while (next_chunk < end_chunk) { size_t const next_offset = column_chunk_offsets[next_chunk]; bool const is_next_compressed = (chunks[next_chunk].codec != Compression::UNCOMPRESSED); if (next_offset != io_offset + io_size || is_next_compressed != is_compressed || chunk_source_map[chunk] != chunk_source_map[next_chunk]) { // Can't merge if not contiguous or mixing compressed and uncompressed // Not coalescing uncompressed with compressed chunks is so that compressed buffers can be // freed earlier (immediately after decompression stage) to limit peak memory requirements break; } io_size += chunks[next_chunk].compressed_size; next_chunk++; } if (io_size != 0) { auto& source = sources[chunk_source_map[chunk]]; if (source->is_device_read_preferred(io_size)) { // Buffer needs to be padded. // Required by `gpuDecodePageData`. auto buffer = rmm::device_buffer(cudf::util::round_up_safe(io_size, BUFFER_PADDING_MULTIPLE), stream); auto fut_read_size = source->device_read_async( io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream); read_tasks.emplace_back(std::move(fut_read_size)); page_data[chunk] = datasource::buffer::create(std::move(buffer)); } else { auto const read_buffer = source->host_read(io_offset, io_size); // Buffer needs to be padded. // Required by `gpuDecodePageData`. auto tmp_buffer = rmm::device_buffer( cudf::util::round_up_safe(read_buffer->size(), BUFFER_PADDING_MULTIPLE), stream); CUDF_CUDA_TRY(cudaMemcpyAsync( tmp_buffer.data(), read_buffer->data(), read_buffer->size(), cudaMemcpyDefault, stream)); page_data[chunk] = datasource::buffer::create(std::move(tmp_buffer)); } auto d_compdata = page_data[chunk]->data(); do { chunks[chunk].compressed_data = d_compdata; d_compdata += chunks[chunk].compressed_size; } while (++chunk != next_chunk); } else { chunk = next_chunk; } } auto sync_fn = [](decltype(read_tasks) read_tasks) { for (auto& task : read_tasks) { task.wait(); } }; return std::async(std::launch::deferred, sync_fn, std::move(read_tasks)); } /** * @brief Return the number of total pages from the given column chunks. * * @param chunks List of column chunk descriptors * @param stream CUDA stream used for device memory operations and kernel launches * * @return The total number of pages */ [[nodiscard]] size_t count_page_headers(cudf::detail::hostdevice_vector<ColumnChunkDesc>& chunks, rmm::cuda_stream_view stream) { size_t total_pages = 0; kernel_error error_code(stream); chunks.host_to_device_async(stream); DecodePageHeaders(chunks.device_ptr(), chunks.size(), error_code.data(), stream); chunks.device_to_host_sync(stream); if (error_code.value() != 0) { CUDF_FAIL("Parquet header parsing failed with code(s) " + error_code.str()); } for (size_t c = 0; c < chunks.size(); c++) { total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } /** * @brief Decode the page information from the given column chunks. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * @returns The size in bytes of level type data required */ int decode_page_headers(cudf::detail::hostdevice_vector<ColumnChunkDesc>& chunks, cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream) { // IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages), // please update preprocess_nested_columns to reflect this. for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } kernel_error error_code(stream); chunks.host_to_device_async(stream); DecodePageHeaders(chunks.device_ptr(), chunks.size(), error_code.data(), stream); if (error_code.value() != 0) { // TODO(ets): if an unsupported encoding was detected, do extra work to figure out which one CUDF_FAIL("Parquet header parsing failed with code(s)" + error_code.str()); } // compute max bytes needed for level data auto level_bit_size = cudf::detail::make_counting_transform_iterator( 0, [chunks = chunks.d_begin()] __device__(int i) { auto c = chunks[i]; return static_cast<int>( max(c.level_bits[level_type::REPETITION], c.level_bits[level_type::DEFINITION])); }); // max level data bit size. int const max_level_bits = thrust::reduce(rmm::exec_policy(stream), level_bit_size, level_bit_size + chunks.size(), 0, thrust::maximum<int>()); return std::max(1, cudf::util::div_rounding_up_safe(max_level_bits, 8)); } /** * @brief Decompresses the page data, at page granularity. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * * @return Device buffer to decompressed page data */ [[nodiscard]] rmm::device_buffer decompress_page_data( cudf::detail::hostdevice_vector<ColumnChunkDesc>& chunks, cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream) { auto for_each_codec_page = [&](Compression codec, std::function<void(size_t)> const& f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_buffer debrotli_scratch; // Count the exact number of compressed pages size_t num_comp_pages = 0; size_t total_decomp_size = 0; struct codec_stats { Compression compression_type = UNCOMPRESSED; size_t num_pages = 0; int32_t max_decompressed_size = 0; size_t total_decomp_size = 0; }; std::array codecs{codec_stats{GZIP}, codec_stats{SNAPPY}, codec_stats{BROTLI}, codec_stats{ZSTD}}; auto is_codec_supported = [&codecs](int8_t codec) { if (codec == UNCOMPRESSED) return true; return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) { return codec == cstats.compression_type; }) != codecs.end(); }; CUDF_EXPECTS(std::all_of(chunks.begin(), chunks.end(), [&is_codec_supported](auto const& chunk) { return is_codec_supported(chunk.codec); }), "Unsupported compression type"); for (auto& codec : codecs) { for_each_codec_page(codec.compression_type, [&](size_t page) { auto page_uncomp_size = pages[page].uncompressed_page_size; total_decomp_size += page_uncomp_size; codec.total_decomp_size += page_uncomp_size; codec.max_decompressed_size = std::max(codec.max_decompressed_size, page_uncomp_size); codec.num_pages++; num_comp_pages++; }); if (codec.compression_type == BROTLI && codec.num_pages > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream); } } // Dispatch batches of pages to decompress for each codec. // Buffer needs to be padded, required by `gpuDecodePageData`. rmm::device_buffer decomp_pages( cudf::util::round_up_safe(total_decomp_size, BUFFER_PADDING_MULTIPLE), stream); std::vector<device_span<uint8_t const>> comp_in; comp_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> comp_out; comp_out.reserve(num_comp_pages); // vectors to save v2 def and rep level data, if any std::vector<device_span<uint8_t const>> copy_in; copy_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> copy_out; copy_out.reserve(num_comp_pages); rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream); thrust::fill(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), compression_result{0, compression_status::FAILURE}); size_t decomp_offset = 0; int32_t start_pos = 0; for (auto const& codec : codecs) { if (codec.num_pages == 0) { continue; } for_each_codec_page(codec.compression_type, [&](size_t page_idx) { auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset; auto& page = pages[page_idx]; // offset will only be non-zero for V2 pages auto const offset = page.lvl_bytes[level_type::DEFINITION] + page.lvl_bytes[level_type::REPETITION]; // for V2 need to copy def and rep level info into place, and then offset the // input and output buffers. otherwise we'd have to keep both the compressed // and decompressed data. if (offset != 0) { copy_in.emplace_back(page.page_data, offset); copy_out.emplace_back(dst_base, offset); } comp_in.emplace_back(page.page_data + offset, static_cast<size_t>(page.compressed_page_size - offset)); comp_out.emplace_back(dst_base + offset, static_cast<size_t>(page.uncompressed_page_size - offset)); page.page_data = dst_base; decomp_offset += page.uncompressed_page_size; }); host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos, codec.num_pages}; auto const d_comp_in = cudf::detail::make_device_uvector_async( comp_in_view, stream, rmm::mr::get_current_device_resource()); host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos, codec.num_pages); auto const d_comp_out = cudf::detail::make_device_uvector_async( comp_out_view, stream, rmm::mr::get_current_device_resource()); device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages); switch (codec.compression_type) { case GZIP: gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream); break; case SNAPPY: if (cudf::io::detail::nvcomp_integration::is_stable_enabled()) { nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); } else { gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream); } break; case ZSTD: nvcomp::batched_decompress(nvcomp::compression_type::ZSTD, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); break; case BROTLI: gpu_debrotli(d_comp_in, d_comp_out, d_comp_res_view, debrotli_scratch.data(), debrotli_scratch.size(), stream); break; default: CUDF_FAIL("Unexpected decompression dispatch"); break; } start_pos += codec.num_pages; } CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), [] __device__(auto const& res) { return res.status == compression_status::SUCCESS; }), "Error during decompression"); // now copy the uncompressed V2 def and rep level data if (not copy_in.empty()) { auto const d_copy_in = cudf::detail::make_device_uvector_async( copy_in, stream, rmm::mr::get_current_device_resource()); auto const d_copy_out = cudf::detail::make_device_uvector_async( copy_out, stream, rmm::mr::get_current_device_resource()); gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream); stream.synchronize(); } // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer pages.host_to_device_async(stream); return decomp_pages; } } // namespace void reader::impl::allocate_nesting_info() { auto const& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; auto& page_nesting_info = _pass_itm_data->page_nesting_info; auto& page_nesting_decode_info = _pass_itm_data->page_nesting_decode_info; // compute total # of page_nesting infos needed and allocate space. doing this in one // buffer to keep it to a single gpu allocation size_t const total_page_nesting_infos = std::accumulate( chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) { // the schema of the input column auto const& schema = _metadata->get_schema(chunk.src_col_schema); auto const per_page_nesting_info_size = max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema)); return total + (per_page_nesting_info_size * chunk.num_data_pages); }); page_nesting_info = cudf::detail::hostdevice_vector<PageNestingInfo>{total_page_nesting_infos, _stream}; page_nesting_decode_info = cudf::detail::hostdevice_vector<PageNestingDecodeInfo>{total_page_nesting_infos, _stream}; // update pointers in the PageInfos int target_page_index = 0; int src_info_index = 0; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; auto& schema = _metadata->get_schema(src_col_schema); auto const per_page_nesting_info_size = std::max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema)); // skip my dict pages target_page_index += chunks[idx].num_dict_pages; for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_decode = page_nesting_decode_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_info_size = per_page_nesting_info_size; pages[target_page_index + p_idx].num_output_nesting_levels = _metadata->get_output_nesting_depth(src_col_schema); src_info_index += per_page_nesting_info_size; } target_page_index += chunks[idx].num_data_pages; } // fill in int nesting_info_index = 0; std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; // schema of the input column auto& schema = _metadata->get_schema(src_col_schema); // real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc) int max_depth = _metadata->get_output_nesting_depth(src_col_schema); // # of nesting infos stored per page for this column auto const per_page_nesting_info_size = std::max(schema.max_definition_level + 1, max_depth); // if this column has lists, generate depth remapping std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; if (schema.max_repetition_level > 0) { generate_depth_remappings(depth_remapping, src_col_schema, *_metadata); } // fill in host-side nesting info int schema_idx = src_col_schema; auto cur_schema = _metadata->get_schema(schema_idx); int cur_depth = max_depth - 1; while (schema_idx > 0) { // stub columns (basically the inner field of a list scheme element) are not real columns. // we can ignore them for the purposes of output nesting info if (!cur_schema.is_stub()) { // initialize each page within the chunk for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { PageNestingInfo* pni = &page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; PageNestingDecodeInfo* nesting_info = &page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; // if we have lists, set our start and end depth remappings if (schema.max_repetition_level > 0) { auto remap = depth_remapping.find(src_col_schema); CUDF_EXPECTS(remap != depth_remapping.end(), "Could not find depth remapping for schema"); std::vector<int> const& rep_depth_remap = (remap->second.first); std::vector<int> const& def_depth_remap = (remap->second.second); for (size_t m = 0; m < rep_depth_remap.size(); m++) { nesting_info[m].start_depth = rep_depth_remap[m]; } for (size_t m = 0; m < def_depth_remap.size(); m++) { nesting_info[m].end_depth = def_depth_remap[m]; } } // values indexed by output column index nesting_info[cur_depth].max_def_level = cur_schema.max_definition_level; pni[cur_depth].size = 0; pni[cur_depth].type = to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id()); pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL; } // move up the hierarchy cur_depth--; } // next schema schema_idx = cur_schema.parent_idx; cur_schema = _metadata->get_schema(schema_idx); } nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages); } // copy nesting info to the device page_nesting_info.host_to_device_async(_stream); page_nesting_decode_info.host_to_device_async(_stream); } void reader::impl::allocate_level_decode_space() { auto& pages = _pass_itm_data->pages_info; // TODO: this could be made smaller if we ignored dictionary pages and pages with no // repetition data. size_t const per_page_decode_buf_size = LEVEL_DECODE_BUF_SIZE * 2 * _pass_itm_data->level_type_size; auto const decode_buf_size = per_page_decode_buf_size * pages.size(); _pass_itm_data->level_decode_data = rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource()); // distribute the buffers uint8_t* buf = static_cast<uint8_t*>(_pass_itm_data->level_decode_data.data()); for (size_t idx = 0; idx < pages.size(); idx++) { auto& p = pages[idx]; p.lvl_decode_buf[level_type::DEFINITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); p.lvl_decode_buf[level_type::REPETITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _pass_itm_data->level_type_size); } } std::pair<bool, std::vector<std::future<void>>> reader::impl::read_and_decompress_column_chunks() { auto const& row_groups_info = _pass_itm_data->row_groups; auto const num_rows = _pass_itm_data->num_rows; auto& raw_page_data = _pass_itm_data->raw_page_data; auto& chunks = _pass_itm_data->chunks; // Descriptors for all the chunks that make up the selected columns auto const num_input_columns = _input_columns.size(); auto const num_chunks = row_groups_info.size() * num_input_columns; // Association between each column chunk and its source std::vector<size_type> chunk_source_map(num_chunks); // Tracker for eventually deallocating compressed and uncompressed data raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks); // Keep track of column chunk file offsets std::vector<size_t> column_chunk_offsets(num_chunks); // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; std::vector<std::future<void>> read_chunk_tasks; size_type chunk_count = 0; for (auto const& rg : row_groups_info) { auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index); auto const row_group_source = rg.source_index; auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); // generate ColumnChunkDesc objects for everything to be decoded (all input columns) for (size_t i = 0; i < num_input_columns; ++i) { auto const& col = _input_columns[i]; // look up metadata auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); column_chunk_offsets[chunk_count] = (col_meta.dictionary_page_offset != 0) ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; // Map each column chunk to its column index and its source index chunk_source_map[chunk_count] = row_group_source; if (col_meta.codec != Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } chunk_count++; } remaining_rows -= row_group_rows; } // Read compressed chunk data to device memory read_chunk_tasks.push_back(read_column_chunks_async(_sources, raw_page_data, chunks, 0, chunks.size(), column_chunk_offsets, chunk_source_map, _stream)); CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read."); return {total_decompressed_size > 0, std::move(read_chunk_tasks)}; } void reader::impl::load_and_decompress_data() { // This function should never be called if `num_rows == 0`. CUDF_EXPECTS(_pass_itm_data->num_rows > 0, "Number of reading rows must not be zero."); auto& raw_page_data = _pass_itm_data->raw_page_data; auto& decomp_page_data = _pass_itm_data->decomp_page_data; auto& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; auto const [has_compressed_data, read_chunks_tasks] = read_and_decompress_column_chunks(); for (auto& task : read_chunks_tasks) { task.wait(); } // Process dataset chunk pages into output columns auto const total_pages = count_page_headers(chunks, _stream); if (total_pages <= 0) { return; } pages = cudf::detail::hostdevice_vector<PageInfo>(total_pages, total_pages, _stream); // decoding of column/page information _pass_itm_data->level_type_size = decode_page_headers(chunks, pages, _stream); pages.device_to_host_sync(_stream); if (has_compressed_data) { decomp_page_data = decompress_page_data(chunks, pages, _stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { if (chunks[c].codec != Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } } } // build output column info // walk the schema, building out_buffers that mirror what our final cudf columns will look // like. important : there is not necessarily a 1:1 mapping between input columns and output // columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct // columns. The "structiness" is simply implied by the schema. For example, this schema: // required group field_id=1 name { // required binary field_id=2 firstname (String); // required binary field_id=3 middlename (String); // required binary field_id=4 lastname (String); // } // will only contain 3 columns of data (firstname, middlename, lastname). But of course // "name" is a struct column that we want to return, so we have to make sure that we // create it ourselves. // std::vector<output_column_info> output_info = build_output_column_info(); // the following two allocate functions modify the page data { // nesting information (sizes, etc) stored -per page- // note : even for flat schemas, we allocate 1 level of "nesting" info allocate_nesting_info(); // level decode space allocate_level_decode_space(); } pages.host_to_device_async(_stream); } namespace { struct cumulative_row_info { size_t row_count; // cumulative row count size_t size_bytes; // cumulative size in bytes int key; // schema index }; #if defined(PREPROCESS_DEBUG) void print_pages(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view _stream) { pages.device_to_host_sync(_stream); for (size_t idx = 0; idx < pages.size(); idx++) { auto const& p = pages[idx]; // skip dictionary pages if (p.flags & PAGEINFO_FLAGS_DICTIONARY) { continue; } printf( "P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), " "str_bytes(%d)\n", idx, p.src_col_schema, p.chunk_row, p.num_rows, p.skipped_values, p.skipped_leaf_values, p.str_bytes); } } #endif // PREPROCESS_DEBUG struct get_page_chunk_idx { __device__ size_type operator()(PageInfo const& page) { return page.chunk_idx; } }; struct get_page_num_rows { __device__ size_type operator()(PageInfo const& page) { return page.num_rows; } }; struct get_page_column_index { ColumnChunkDesc const* chunks; __device__ size_type operator()(PageInfo const& page) { return chunks[page.chunk_idx].src_col_index; } }; struct input_col_info { int const schema_idx; size_type const nesting_depth; }; /** * @brief Converts a 1-dimensional index into page, depth and column indices used in * allocate_columns to compute columns sizes. * * The input index will iterate through pages, nesting depth and column indices in that order. */ struct reduction_indices { size_t const page_idx; size_type const depth_idx; size_type const col_idx; __device__ reduction_indices(size_t index_, size_type max_depth_, size_t num_pages_) : page_idx(index_ % num_pages_), depth_idx((index_ / num_pages_) % max_depth_), col_idx(index_ / (max_depth_ * num_pages_)) { } }; /** * @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema. */ struct get_page_nesting_size { input_col_info const* const input_cols; size_type const max_depth; size_t const num_pages; PageInfo const* const pages; int const* page_indices; __device__ size_type operator()(size_t index) const { auto const indices = reduction_indices{index, max_depth, num_pages}; auto const& page = pages[page_indices[indices.page_idx]]; if (page.src_col_schema != input_cols[indices.col_idx].schema_idx || page.flags & PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return 0; } return page.nesting[indices.depth_idx].batch_size; } }; struct get_reduction_key { size_t const num_pages; __device__ size_t operator()(size_t index) const { return index / num_pages; } }; /** * @brief Writes to the chunk_row field of the PageInfo struct. */ struct chunk_row_output_iter { PageInfo* p; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; __host__ __device__ chunk_row_output_iter operator+(int i) { return chunk_row_output_iter{p + i}; } __host__ __device__ void operator++() { p++; } __device__ reference operator[](int i) { return p[i].chunk_row; } __device__ reference operator*() { return p->chunk_row; } }; /** * @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema. */ struct start_offset_output_iterator { PageInfo const* pages; int const* page_indices; size_t cur_index; input_col_info const* input_cols; size_type max_depth; size_t num_pages; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; constexpr void operator=(start_offset_output_iterator const& other) { pages = other.pages; page_indices = other.page_indices; cur_index = other.cur_index; input_cols = other.input_cols; max_depth = other.max_depth; num_pages = other.num_pages; } constexpr start_offset_output_iterator operator+(size_t i) { return start_offset_output_iterator{ pages, page_indices, cur_index + i, input_cols, max_depth, num_pages}; } constexpr void operator++() { cur_index++; } __device__ reference operator[](size_t i) { return dereference(cur_index + i); } __device__ reference operator*() { return dereference(cur_index); } private: __device__ reference dereference(size_t index) { auto const indices = reduction_indices{index, max_depth, num_pages}; PageInfo const& p = pages[page_indices[indices.page_idx]]; if (p.src_col_schema != input_cols[indices.col_idx].schema_idx || p.flags & PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return empty; } return p.nesting_decode[indices.depth_idx].page_start_value; } }; struct flat_column_num_rows { PageInfo const* pages; ColumnChunkDesc const* chunks; __device__ size_type operator()(size_type pindex) const { PageInfo const& page = pages[pindex]; // ignore dictionary pages and pages belonging to any column containing repetition (lists) if ((page.flags & PAGEINFO_FLAGS_DICTIONARY) || (chunks[page.chunk_idx].max_level[level_type::REPETITION] > 0)) { return 0; } return page.num_rows; } }; struct row_counts_nonzero { __device__ bool operator()(size_type count) const { return count > 0; } }; struct row_counts_different { size_type const expected; __device__ bool operator()(size_type count) const { return (count != 0) && (count != expected); } }; /** * @brief Detect malformed parquet input data. * * We have seen cases where parquet files can be oddly malformed. This function specifically * detects one case in particular: * * - When you have a file containing N rows * - For some reason, the sum total of the number of rows over all pages for a given column * is != N * * @param pages All pages to be decoded * @param chunks Chunk data * @param page_keys Keys (schema id) associated with each page, sorted by column * @param page_index Page indices for iteration, sorted by column * @param expected_row_count Expected row count, if applicable * @param stream CUDA stream used for device memory operations and kernel launches */ void detect_malformed_pages(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, device_span<int const> page_keys, device_span<int const> page_index, std::optional<size_t> expected_row_count, rmm::cuda_stream_view stream) { // sum row counts for all non-dictionary, non-list columns. other columns will be indicated as 0 rmm::device_uvector<size_type> row_counts(pages.size(), stream); // worst case: num keys == num pages auto const size_iter = thrust::make_transform_iterator( page_index.begin(), flat_column_num_rows{pages.device_ptr(), chunks.device_ptr()}); auto const row_counts_begin = row_counts.begin(); auto const row_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), size_iter, thrust::make_discard_iterator(), row_counts_begin) .second; // make sure all non-zero row counts are the same rmm::device_uvector<size_type> compacted_row_counts(pages.size(), stream); auto const compacted_row_counts_begin = compacted_row_counts.begin(); auto const compacted_row_counts_end = thrust::copy_if(rmm::exec_policy(stream), row_counts_begin, row_counts_end, compacted_row_counts_begin, row_counts_nonzero{}); if (compacted_row_counts_end != compacted_row_counts_begin) { size_t const found_row_count = static_cast<size_t>(compacted_row_counts.element(0, stream)); // if we somehow don't match the expected row count from the row groups themselves if (expected_row_count.has_value()) { CUDF_EXPECTS(expected_row_count.value() == found_row_count, "Encountered malformed parquet page data (unexpected row count in page data)"); } // all non-zero row counts must be the same auto const chk = thrust::count_if(rmm::exec_policy(stream), compacted_row_counts_begin, compacted_row_counts_end, row_counts_different{static_cast<size_type>(found_row_count)}); CUDF_EXPECTS(chk == 0, "Encountered malformed parquet page data (row count mismatch in page data)"); } } struct page_to_string_size { PageInfo* pages; ColumnChunkDesc const* chunks; __device__ size_t operator()(size_type page_idx) const { auto const page = pages[page_idx]; auto const chunk = chunks[page.chunk_idx]; if (not is_string_col(chunk) || (page.flags & PAGEINFO_FLAGS_DICTIONARY) != 0) { return 0; } return pages[page_idx].str_bytes; } }; struct page_offset_output_iter { PageInfo* p; size_type const* index; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; __host__ __device__ page_offset_output_iter operator+(int i) { return page_offset_output_iter{p, index + i}; } __host__ __device__ void operator++() { index++; } __device__ reference operator[](int i) { return p[index[i]].str_offset; } __device__ reference operator*() { return p[*index].str_offset; } }; } // anonymous namespace void reader::impl::preprocess_pages(bool uses_custom_row_bounds, size_t chunk_read_limit) { auto const skip_rows = _pass_itm_data->skip_rows; auto const num_rows = _pass_itm_data->num_rows; auto& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; // compute page ordering. // // ordering of pages is by input column schema, repeated across row groups. so // if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like // // 1, 1, 2, 2, 3, 3 // // However, if we had more than one row group, the pattern would be // // 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3 // ^ row group 0 | // ^ row group 1 // // To process pages by key (exclusive_scan_by_key, reduce_by_key, etc), the ordering we actually // want is // // 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 // // We also need to preserve key-relative page ordering, so we need to use a stable sort. rmm::device_uvector<int> page_keys(pages.size(), _stream); rmm::device_uvector<int> page_index(pages.size(), _stream); { thrust::transform(rmm::exec_policy(_stream), pages.device_ptr(), pages.device_ptr() + pages.size(), page_keys.begin(), get_page_column_index{chunks.device_ptr()}); thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end()); thrust::stable_sort_by_key(rmm::exec_policy(_stream), page_keys.begin(), page_keys.end(), page_index.begin(), thrust::less<int>()); } // detect malformed columns. // - we have seen some cases in the wild where we have a row group containing N // rows, but the total number of rows in the pages for column X is != N. while it // is possible to load this by just capping the number of rows read, we cannot tell // which rows are invalid so we may be returning bad data. in addition, this mismatch // confuses the chunked reader detect_malformed_pages(pages, chunks, page_keys, page_index, uses_custom_row_bounds ? std::nullopt : std::make_optional(num_rows), _stream); // iterate over all input columns and determine if they contain lists so we can further // preprocess them. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; break; } } if (has_lists) { break; } } // generate string dict indices if necessary { auto is_dict_chunk = [](ColumnChunkDesc const& chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_input_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements _pass_itm_data->str_dict_index = cudf::detail::make_zeroed_device_uvector_async<string_index_pair>( total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource()); // Update chunks with pointers to string dict indices for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { input_column_info const& input_col = _input_columns[chunks[c].src_col_index]; CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema, "Column/page schema index mismatch"); if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = _pass_itm_data->str_dict_index.data() + str_ofs; str_ofs += pages[page_count].num_input_values; } // column_data_base will always point to leaf data, even for nested types. page_count += chunks[c].max_num_pages; } if (total_str_dict_indexes > 0) { chunks.host_to_device_async(_stream); BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream); } } // intermediate data we will need for further chunked reads if (has_lists || chunk_read_limit > 0) { // computes: // PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into // account), not just the number of values. PageNestingInfo::size for each level of nesting, for // each page. // // we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen // if: // - user has passed custom row bounds // - we will be doing a chunked read ComputePageSizes(pages, chunks, 0, // 0-max size_t. process all possible rows std::numeric_limits<size_t>::max(), true, // compute num_rows chunk_read_limit > 0, // compute string sizes _pass_itm_data->level_type_size, _stream); // computes: // PageInfo::chunk_row (the absolute start row index) for all pages // Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already // been computed during header decoding. the overall amount of work here is very small though. auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{}); auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{}); thrust::exclusive_scan_by_key(rmm::exec_policy(_stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // retrieve pages back pages.device_to_host_sync(_stream); // print_pages(pages, _stream); } // preserve page ordering data for string decoder _pass_itm_data->page_keys = std::move(page_keys); _pass_itm_data->page_index = std::move(page_index); // compute splits for the pass compute_splits_for_pass(); } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) { auto const& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to parse"); // computes: // PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into // account. PageInfo::skipped_values, which tells us where to start decoding in the input to // respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds // is set (if the user has specified artificial bounds). if (uses_custom_row_bounds) { ComputePageSizes(pages, chunks, skip_rows, num_rows, false, // num_rows is already computed false, // no need to compute string sizes _pass_itm_data->level_type_size, _stream); // print_pages(pages, _stream); } // iterate over all input columns and allocate any associated output // buffers if they are not part of a list hierarchy. mark down // if we have any list columns that need further processing. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; } // if we haven't already processed this column because it is part of a struct hierarchy else if (out_buf.size == 0) { // add 1 for the offset if this is a list column out_buf.create( out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows, _stream, _mr); } } } // compute output column sizes by examining the pages of the -input- columns if (has_lists) { auto& page_index = _pass_itm_data->page_index; std::vector<input_col_info> h_cols_info; h_cols_info.reserve(_input_columns.size()); std::transform(_input_columns.cbegin(), _input_columns.cend(), std::back_inserter(h_cols_info), [](auto& col) -> input_col_info { return {col.schema_idx, static_cast<size_type>(col.nesting_depth())}; }); auto const max_depth = (*std::max_element(h_cols_info.cbegin(), h_cols_info.cend(), [](auto& l, auto& r) { return l.nesting_depth < r.nesting_depth; })) .nesting_depth; auto const d_cols_info = cudf::detail::make_device_uvector_async( h_cols_info, _stream, rmm::mr::get_current_device_resource()); auto const num_keys = _input_columns.size() * max_depth * pages.size(); // size iterator. indexes pages by sorted order rmm::device_uvector<size_type> size_input{num_keys, _stream}; thrust::transform( rmm::exec_policy(_stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_keys), size_input.begin(), get_page_nesting_size{ d_cols_info.data(), max_depth, pages.size(), pages.device_ptr(), page_index.begin()}); auto const reduction_keys = cudf::detail::make_counting_transform_iterator(0, get_reduction_key{pages.size()}); cudf::detail::hostdevice_vector<size_t> sizes{_input_columns.size() * max_depth, _stream}; // find the size of each column thrust::reduce_by_key(rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), thrust::make_discard_iterator(), sizes.d_begin()); // for nested hierarchies, compute per-page start offset thrust::exclusive_scan_by_key( rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), start_offset_output_iterator{ pages.device_ptr(), page_index.begin(), 0, d_cols_info.data(), max_depth, pages.size()}); sizes.device_to_host_sync(_stream); for (size_type idx = 0; idx < static_cast<size_type>(_input_columns.size()); idx++) { auto const& input_col = _input_columns[idx]; auto* cols = &_output_buffers; for (size_type l_idx = 0; l_idx < static_cast<size_type>(input_col.nesting_depth()); l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this buffer is part of a list hierarchy, we need to determine it's // final size and allocate it here. // // for struct columns, higher levels of the output columns are shared between input // columns. so don't compute any given level more than once. if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) { auto size = sizes[(idx * max_depth) + l_idx]; // if this is a list column add 1 for non-leaf levels for the terminating offset if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; } // allocate out_buf.create(size, _stream, _mr); } } } } } std::vector<size_t> reader::impl::calculate_page_string_offsets() { auto& chunks = _pass_itm_data->chunks; auto& pages = _pass_itm_data->pages_info; auto const& page_keys = _pass_itm_data->page_keys; auto const& page_index = _pass_itm_data->page_index; std::vector<size_t> col_sizes(_input_columns.size(), 0L); rmm::device_uvector<size_t> d_col_sizes(col_sizes.size(), _stream); // use page_index to fetch page string sizes in the proper order auto val_iter = thrust::make_transform_iterator( page_index.begin(), page_to_string_size{pages.device_ptr(), chunks.device_ptr()}); // do scan by key to calculate string offsets for each page thrust::exclusive_scan_by_key(rmm::exec_policy_nosync(_stream), page_keys.begin(), page_keys.end(), val_iter, page_offset_output_iter{pages.device_ptr(), page_index.data()}); // now sum up page sizes rmm::device_uvector<int> reduce_keys(col_sizes.size(), _stream); thrust::reduce_by_key(rmm::exec_policy_nosync(_stream), page_keys.begin(), page_keys.end(), val_iter, reduce_keys.begin(), d_col_sizes.begin()); cudaMemcpyAsync(col_sizes.data(), d_col_sizes.data(), sizeof(size_t) * col_sizes.size(), cudaMemcpyDeviceToHost, _stream); _stream.synchronize(); return col_sizes; } } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp/src/io
rapidsai_public_repos/cudf/cpp/src/io/parquet/parquet_gpu.hpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "io/comp/gpuinflate.hpp" #include "io/parquet/parquet.hpp" #include "io/parquet/parquet_common.hpp" #include "io/statistics/statistics.cuh" #include "io/utilities/column_buffer.hpp" #include "io/utilities/hostdevice_vector.hpp" #include <cudf/io/datasource.hpp> #include <cudf/types.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <cuda/atomic> #include <cuda_runtime.h> #include <type_traits> #include <vector> namespace cudf::io::parquet::detail { using cudf::io::detail::string_index_pair; // Largest number of bits to use for dictionary keys constexpr int MAX_DICT_BITS = 24; // Total number of unsigned 24 bit values constexpr size_type MAX_DICT_SIZE = (1 << MAX_DICT_BITS) - 1; // level decode buffer size. constexpr int LEVEL_DECODE_BUF_SIZE = 2048; template <int rolling_size> constexpr int rolling_index(int index) { return index % rolling_size; } // see setupLocalPageInfo() in page_decode.cuh for supported page encodings constexpr bool is_supported_encoding(Encoding enc) { switch (enc) { case Encoding::PLAIN: case Encoding::PLAIN_DICTIONARY: case Encoding::RLE: case Encoding::RLE_DICTIONARY: case Encoding::DELTA_BINARY_PACKED: case Encoding::DELTA_BYTE_ARRAY: return true; default: return false; } } /** * @brief Atomically OR `error` into `error_code`. */ constexpr void set_error(int32_t error, int32_t* error_code) { if (error != 0) { cuda::atomic_ref<int32_t, cuda::thread_scope_device> ref{*error_code}; ref.fetch_or(error, cuda::std::memory_order_relaxed); } } /** * @brief Enum for the different types of errors that can occur during decoding. * * These values are used as bitmasks, so they must be powers of 2. */ enum class decode_error : int32_t { DATA_STREAM_OVERRUN = 0x1, LEVEL_STREAM_OVERRUN = 0x2, UNSUPPORTED_ENCODING = 0x4, INVALID_LEVEL_RUN = 0x8, INVALID_DATA_TYPE = 0x10, EMPTY_PAGE = 0x20, INVALID_DICT_WIDTH = 0x40, DELTA_PARAM_MISMATCH = 0x80, DELTA_PARAMS_UNSUPPORTED = 0x100, }; /** * @brief Struct representing an input column in the file. */ struct input_column_info { int schema_idx; std::string name; bool has_repetition; // size == nesting depth. the associated real output // buffer index in the dest column for each level of nesting. std::vector<int> nesting; input_column_info(int _schema_idx, std::string _name, bool _has_repetition) : schema_idx(_schema_idx), name(_name), has_repetition(_has_repetition) { } auto nesting_depth() const { return nesting.size(); } }; // The delta encodings use ULEB128 integers, but parquet only uses max 64 bits. using uleb128_t = uint64_t; using zigzag128_t = int64_t; // this is in C++23 #if !defined(__cpp_lib_is_scoped_enum) template <typename Enum, bool = std::is_enum_v<Enum>> struct is_scoped_enum { static const bool value = not std::is_convertible_v<Enum, std::underlying_type_t<Enum>>; }; template <typename Enum> struct is_scoped_enum<Enum, false> { static const bool value = false; }; #else using std::is_scoped_enum; #endif // helpers to do bit operations on scoped enums template <class T1, class T2, typename std::enable_if_t<(is_scoped_enum<T1>::value and std::is_same_v<T1, T2>) or (is_scoped_enum<T1>::value and std::is_same_v<uint32_t, T2>) or (is_scoped_enum<T2>::value and std::is_same_v<uint32_t, T1>)>* = nullptr> constexpr uint32_t BitAnd(T1 a, T2 b) { return static_cast<uint32_t>(a) & static_cast<uint32_t>(b); } template <class T1, class T2, typename std::enable_if_t<(is_scoped_enum<T1>::value and std::is_same_v<T1, T2>) or (is_scoped_enum<T1>::value and std::is_same_v<uint32_t, T2>) or (is_scoped_enum<T2>::value and std::is_same_v<uint32_t, T1>)>* = nullptr> constexpr uint32_t BitOr(T1 a, T2 b) { return static_cast<uint32_t>(a) | static_cast<uint32_t>(b); } /** * @brief Enums for the flags in the page header */ enum { PAGEINFO_FLAGS_DICTIONARY = (1 << 0), // Indicates a dictionary page PAGEINFO_FLAGS_V2 = (1 << 1), // V2 page header }; /** * @brief Enum for the two encoding streams */ enum level_type { DEFINITION = 0, REPETITION, NUM_LEVEL_TYPES }; /** * @brief Enum of mask bits for the PageInfo kernel_mask * * Used to control which decode kernels to run. */ enum class decode_kernel_mask { NONE = 0, GENERAL = (1 << 0), // Run catch-all decode kernel STRING = (1 << 1), // Run decode kernel for string data DELTA_BINARY = (1 << 2), // Run decode kernel for DELTA_BINARY_PACKED data DELTA_BYTE_ARRAY = (1 << 3) // Run decode kernel for DELTA_BYTE_ARRAY encoded data }; /** * @brief Nesting information specifically needed by the decode and preprocessing * kernels. * * This data is kept separate from PageNestingInfo to keep it as small as possible. * It is used in a cached form in shared memory when possible. */ struct PageNestingDecodeInfo { // set up prior to decoding int32_t max_def_level; // input repetition/definition levels are remapped with these values // into the corresponding real output nesting depths. int32_t start_depth; int32_t end_depth; // computed during preprocessing int32_t page_start_value; // computed during decoding int32_t null_count; // used internally during decoding int32_t valid_map_offset; int32_t valid_count; int32_t value_count; uint8_t* data_out; uint8_t* string_out; bitmask_type* valid_map; }; // Use up to 512 bytes of shared memory as a cache for nesting information. // As of 1/20/23, this gives us a max nesting depth of 10 (after which it falls back to // global memory). This handles all but the most extreme cases. constexpr int max_cacheable_nesting_decode_info = (512) / sizeof(PageNestingDecodeInfo); /** * @brief Nesting information * * This struct serves two purposes: * * - It stores information about output (cudf) columns * - It provides a mapping from input column depth to output column depth via * the start_depth and end_depth fields. * */ struct PageNestingInfo { // set at initialization (see start_offset_output_iterator in reader_impl_preprocess.cu) cudf::type_id type; // type of the corresponding cudf output column bool nullable; // TODO: these fields might make sense to move into PageNestingDecodeInfo for memory performance // reasons. int32_t size; // this page/nesting-level's row count contribution to the output column, if fully // decoded int32_t batch_size; // the size of the page for this batch }; /** * @brief Struct describing a particular page of column chunk data */ struct PageInfo { uint8_t* page_data; // Compressed page data before decompression, or uncompressed data after // decompression int32_t compressed_page_size; // compressed data size in bytes int32_t uncompressed_page_size; // uncompressed data size in bytes // for V2 pages, the def and rep level data is not compressed, and lacks the 4-byte length // indicator. instead the lengths for these are stored in the header. int32_t lvl_bytes[level_type::NUM_LEVEL_TYPES]; // length of the rep/def levels (V2 header) // Number of values in this data page or dictionary. // Important : the # of input values does not necessarily // correspond to the number of rows in the output. It just reflects the number // of values in the input stream. // - In the case of a flat schema, it will correspond to the # of output rows // - In the case of a nested schema, you have to decode the repetition and definition // levels to extract actual column values int32_t num_input_values; int32_t chunk_row; // starting row of this page relative to the start of the chunk int32_t num_rows; // number of rows in this page // the next four are calculated in gpuComputePageStringSizes int32_t num_nulls; // number of null values (V2 header), but recalculated for string cols int32_t num_valids; // number of non-null values, taking into account skip_rows/num_rows int32_t start_val; // index of first value of the string data stream to use int32_t end_val; // index of last value in string data stream int32_t chunk_idx; // column chunk this page belongs to int32_t src_col_schema; // schema index of this column uint8_t flags; // PAGEINFO_FLAGS_XXX Encoding encoding; // Encoding for data or dictionary page Encoding definition_level_encoding; // Encoding used for definition levels (data page) Encoding repetition_level_encoding; // Encoding used for repetition levels (data page) // for nested types, we run a preprocess step in order to determine output // column sizes. Because of this, we can jump directly to the position in the // input data to start decoding instead of reading all of the data and discarding // rows we don't care about. // // NOTE: for flat hierarchies we do not do the preprocess step, so skipped_values and // skipped_leaf_values will always be 0. // // # of values skipped in the repetition/definition level stream int32_t skipped_values; // # of values skipped in the actual data stream. int32_t skipped_leaf_values; // for string columns only, the size of all the chars in the string for // this page. only valid/computed during the base preprocess pass int32_t str_bytes; int32_t str_offset; // offset into string data for this page // nesting information (input/output) for each page. this array contains // input column nesting information, output column nesting information and // mappings between the two. the length of the array, nesting_info_size is // max(num_output_nesting_levels, max_definition_levels + 1) int32_t num_output_nesting_levels; int32_t nesting_info_size; PageNestingInfo* nesting; PageNestingDecodeInfo* nesting_decode; // level decode buffers uint8_t* lvl_decode_buf[level_type::NUM_LEVEL_TYPES]; // temporary space for decoding DELTA_BYTE_ARRAY encoded strings int64_t temp_string_size; uint8_t* temp_string_buf; decode_kernel_mask kernel_mask; }; /** * @brief Struct describing a particular chunk of column data */ struct ColumnChunkDesc { constexpr ColumnChunkDesc() noexcept {}; explicit ColumnChunkDesc(size_t compressed_size_, uint8_t* compressed_data_, size_t num_values_, uint16_t datatype_, uint16_t datatype_length_, size_t start_row_, uint32_t num_rows_, int16_t max_definition_level_, int16_t max_repetition_level_, int16_t max_nesting_depth_, uint8_t def_level_bits_, uint8_t rep_level_bits_, int8_t codec_, int8_t converted_type_, thrust::optional<LogicalType> logical_type_, int8_t decimal_precision_, int32_t ts_clock_rate_, int32_t src_col_index_, int32_t src_col_schema_) : compressed_data(compressed_data_), compressed_size(compressed_size_), num_values(num_values_), start_row(start_row_), num_rows(num_rows_), max_level{max_definition_level_, max_repetition_level_}, max_nesting_depth{max_nesting_depth_}, data_type(datatype_ | (datatype_length_ << 3)), level_bits{def_level_bits_, rep_level_bits_}, num_data_pages(0), num_dict_pages(0), max_num_pages(0), page_info(nullptr), str_dict_index(nullptr), valid_map_base{nullptr}, column_data_base{nullptr}, column_string_base{nullptr}, codec(codec_), converted_type(converted_type_), logical_type(logical_type_), decimal_precision(decimal_precision_), ts_clock_rate(ts_clock_rate_), src_col_index(src_col_index_), src_col_schema(src_col_schema_) { } uint8_t const* compressed_data{}; // pointer to compressed column chunk data size_t compressed_size{}; // total compressed data size for this chunk size_t num_values{}; // total number of values in this column size_t start_row{}; // starting row of this chunk uint32_t num_rows{}; // number of rows in this chunk int16_t max_level[level_type::NUM_LEVEL_TYPES]{}; // max definition/repetition level int16_t max_nesting_depth{}; // max nesting depth of the output uint16_t data_type{}; // basic column data type, ((type_length << 3) | // parquet::Type) uint8_t level_bits[level_type::NUM_LEVEL_TYPES]{}; // bits to encode max definition/repetition levels int32_t num_data_pages{}; // number of data pages int32_t num_dict_pages{}; // number of dictionary pages int32_t max_num_pages{}; // size of page_info array PageInfo* page_info{}; // output page info for up to num_dict_pages + // num_data_pages (dictionary pages first) string_index_pair* str_dict_index{}; // index for string dictionary bitmask_type** valid_map_base{}; // base pointers of valid bit map for this column void** column_data_base{}; // base pointers of column data void** column_string_base{}; // base pointers of column string data int8_t codec{}; // compressed codec enum int8_t converted_type{}; // converted type enum thrust::optional<LogicalType> logical_type{}; // logical type int8_t decimal_precision{}; // Decimal precision int32_t ts_clock_rate{}; // output timestamp clock frequency (0=default, 1000=ms, 1000000000=ns) int32_t src_col_index{}; // my input column index int32_t src_col_schema{}; // my schema index in the file }; /** * @brief Struct describing an encoder column */ struct parquet_column_device_view : stats_column_desc { Type physical_type; //!< physical data type ConvertedType converted_type; //!< logical data type uint8_t level_bits; //!< bits to encode max definition (lower nibble) & repetition (upper nibble) //!< levels constexpr uint8_t num_def_level_bits() const { return level_bits & 0xf; } constexpr uint8_t num_rep_level_bits() const { return level_bits >> 4; } size_type const* const* nesting_offsets; //!< If column is a nested type, contains offset array of each nesting level size_type const* level_offsets; //!< Offset array for per-row pre-calculated rep/def level values uint8_t const* rep_values; //!< Pre-calculated repetition level values uint8_t const* def_values; //!< Pre-calculated definition level values uint8_t const* nullability; //!< Array of nullability of each nesting level. e.g. nullable[0] is //!< nullability of parent_column. May be different from //!< col.nullable() in case of chunked writing. bool output_as_byte_array; //!< Indicates this list column is being written as a byte array }; struct EncColumnChunk; /** * @brief Struct describing an encoder page fragment */ struct PageFragment { uint32_t fragment_data_size; //!< Size of fragment data in bytes uint32_t dict_data_size; //!< Size of dictionary for this fragment uint32_t num_values; //!< Number of values in fragment. Different from num_rows for nested type uint32_t start_value_idx; uint32_t num_leaf_values; //!< Number of leaf values in fragment. Does not include nulls at //!< non-leaf level size_type start_row; //!< First row in fragment uint16_t num_rows; //!< Number of rows in fragment uint16_t num_dict_vals; //!< Number of unique dictionary entries EncColumnChunk* chunk; //!< The chunk that this fragment belongs to }; /// Size of hash used for building dictionaries constexpr unsigned int kDictHashBits = 16; constexpr size_t kDictScratchSize = (1 << kDictHashBits) * sizeof(uint32_t); struct EncPage; struct slot_type; // convert Encoding to a mask value constexpr uint32_t encoding_to_mask(Encoding encoding) { return 1 << static_cast<uint32_t>(encoding); } /** * @brief Enum of mask bits for the EncPage kernel_mask * * Used to control which encode kernels to run. */ enum class encode_kernel_mask { PLAIN = (1 << 0), // Run plain encoding kernel DICTIONARY = (1 << 1), // Run dictionary encoding kernel DELTA_BINARY = (1 << 2) // Run DELTA_BINARY_PACKED encoding kernel }; /** * @brief Struct describing an encoder column chunk */ struct EncColumnChunk { parquet_column_device_view const* col_desc; //!< Column description size_type col_desc_id; PageFragment* fragments; //!< First fragment in chunk uint8_t* uncompressed_bfr; //!< Uncompressed page data uint8_t* compressed_bfr; //!< Compressed page data statistics_chunk const* stats; //!< Fragment statistics uint32_t bfr_size; //!< Uncompressed buffer size uint32_t compressed_size; //!< Compressed buffer size uint32_t max_page_data_size; //!< Max data size (excluding header) of any page in this chunk uint32_t page_headers_size; //!< Sum of size of all page headers size_type start_row; //!< First row of chunk uint32_t num_rows; //!< Number of rows in chunk size_type num_values; //!< Number of values in chunk. Different from num_rows for nested types uint32_t first_fragment; //!< First fragment of chunk EncPage* pages; //!< Ptr to pages that belong to this chunk uint32_t first_page; //!< First page of chunk uint32_t num_pages; //!< Number of pages in chunk uint8_t is_compressed; //!< Nonzero if the chunk uses compression uint32_t dictionary_size; //!< Size of dictionary page including header uint32_t ck_stat_size; //!< Size of chunk-level statistics (included in 1st page header) slot_type* dict_map_slots; //!< Hash map storage for calculating dict encoding for this chunk size_type dict_map_size; //!< Size of dict_map_slots size_type num_dict_entries; //!< Total number of entries in dictionary size_type uniq_data_size; //!< Size of dictionary page (set of all unique values) if dict enc is used size_type plain_data_size; //!< Size of data in this chunk if plain encoding is used size_type* dict_data; //!< Dictionary data (unique row indices) size_type* dict_index; //!< Index of value in dictionary page. column[dict_data[dict_index[row]]] uint8_t dict_rle_bits; //!< Bit size for encoding dictionary indices bool use_dictionary; //!< True if the chunk uses dictionary encoding uint8_t* column_index_blob; //!< Binary blob containing encoded column index for this chunk uint32_t column_index_size; //!< Size of column index blob uint32_t encodings; //!< Mask representing the set of encodings used for this chunk }; /** * @brief Struct describing an encoder data page */ struct EncPage { uint8_t* page_data; //!< Ptr to uncompressed page uint8_t* compressed_data; //!< Ptr to compressed page uint16_t num_fragments; //!< Number of fragments in page PageType page_type; //!< Page type Encoding encoding; //!< Encoding used for page data EncColumnChunk* chunk; //!< Chunk that this page belongs to uint32_t chunk_id; //!< Index in chunk array uint32_t hdr_size; //!< Size of page header uint32_t max_hdr_size; //!< Maximum size of page header uint32_t max_data_size; //!< Maximum size of coded page data (excluding header) uint32_t start_row; //!< First row of page uint32_t num_rows; //!< Rows in page uint32_t num_leaf_values; //!< Values in page. Different from num_rows in case of nested types uint32_t num_values; //!< Number of def/rep level values in page. Includes null/empty elements in //!< non-leaf levels uint32_t def_lvl_bytes; //!< Number of bytes of encoded definition level data (V2 only) uint32_t rep_lvl_bytes; //!< Number of bytes of encoded repetition level data (V2 only) compression_result* comp_res; //!< Ptr to compression result uint32_t num_nulls; //!< Number of null values (V2 only) (down here for alignment) encode_kernel_mask kernel_mask; //!< Mask used to control which encoding kernels to run }; /** * @brief Test if the given column chunk is in a string column */ constexpr bool is_string_col(ColumnChunkDesc const& chunk) { auto const not_converted_to_decimal = chunk.converted_type != DECIMAL; auto const non_hashed_byte_array = (chunk.data_type & 7) == BYTE_ARRAY and (chunk.data_type >> 3) != 4; auto const fixed_len_byte_array = (chunk.data_type & 7) == FIXED_LEN_BYTE_ARRAY; return not_converted_to_decimal and (non_hashed_byte_array or fixed_len_byte_array); } /** * @brief Launches kernel for parsing the page headers in the column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks, int32_t* error_code, rmm::cuda_stream_view stream); /** * @brief Launches kernel for building the dictionary index for the column * chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks * @param[in] stream CUDA stream to use */ void BuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks, rmm::cuda_stream_view stream); /** * @brief Get the set of kernels that need to be invoked on these pages as a bitmask. * * This function performs a bitwise OR on all of the individual `kernel_mask` fields on the pages * passed in. * * @param[in] pages List of pages to aggregate * @param[in] stream CUDA stream to use * @return Bitwise OR of all page `kernel_mask` values */ uint32_t GetAggregatedDecodeKernelMask(cudf::detail::hostdevice_vector<PageInfo>& pages, rmm::cuda_stream_view stream); /** * @brief Compute page output size information. * * When dealing with nested hierarchies (those that contain lists), or when doing a chunked * read, we need to obtain more information up front than we have with just the row counts. * * - We need to determine the sizes of each output cudf column per page * - We need to determine information about where to start decoding the value stream * if we are using custom user bounds (skip_rows / num_rows) * - We need to determine actual number of top level rows per page * - If we are doing a chunked read, we need to determine the total string size per page * * * @param pages All pages to be decoded * @param chunks All chunks to be decoded * @param min_rows crop all rows below min_row * @param num_rows Maximum number of rows to read * @param compute_num_rows If set to true, the num_rows field in PageInfo will be * computed * @param compute_string_sizes If set to true, the str_bytes field in PageInfo will * be computed * @param level_type_size Size in bytes of the type for level decoding * @param stream CUDA stream to use */ void ComputePageSizes(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t min_row, size_t num_rows, bool compute_num_rows, bool compute_string_sizes, int level_type_size, rmm::cuda_stream_view stream); /** * @brief Compute string page output size information. * * String columns need accurate data size information to preallocate memory in the column buffer to * store the char data. This calls a kernel to calculate information needed by the string decoding * kernel. On exit, the `str_bytes`, `num_nulls`, `num_valids`, and `str_offset` fields of the * PageInfo struct are updated. This call ignores non-string columns. * * @param[in,out] pages All pages to be decoded * @param[in] chunks All chunks to be decoded * @param[out] temp_string_buf Temporary space needed for decoding DELTA_BYTE_ARRAY strings * @param[in] min_rows crop all rows below min_row * @param[in] num_rows Maximum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding * @param[in] kernel_mask Mask of kernels to run * @param[in] stream CUDA stream to use */ void ComputePageStringSizes(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, rmm::device_uvector<uint8_t>& temp_string_buf, size_t min_row, size_t num_rows, int level_type_size, uint32_t kernel_mask, rmm::cuda_stream_view stream); /** * @brief Launches kernel for reading the column data stored in the pages * * The page data will be written to the output pointed to in the page's * associated column chunk. * * @param[in,out] pages All pages to be decoded * @param[in] chunks All chunks to be decoded * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodePageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream); /** * @brief Launches kernel for reading the string column data stored in the pages * * The page data will be written to the output pointed to in the page's * associated column chunk. * * @param[in,out] pages All pages to be decoded * @param[in] chunks All chunks to be decoded * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodeStringPageData(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream); /** * @brief Launches kernel for reading the DELTA_BINARY_PACKED column data stored in the pages * * The page data will be written to the output pointed to in the page's * associated column chunk. * * @param[in,out] pages All pages to be decoded * @param[in] chunks All chunks to be decoded * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodeDeltaBinary(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream); /** * @brief Launches kernel for reading the DELTA_BYTE_ARRAY column data stored in the pages * * The page data will be written to the output pointed to in the page's * associated column chunk. * * @param[in,out] pages All pages to be decoded * @param[in] chunks All chunks to be decoded * @param[in] num_rows Total number of rows to read * @param[in] min_row Minimum number of rows to read * @param[in] level_type_size Size in bytes of the type for level decoding * @param[out] error_code Error code for kernel failures * @param[in] stream CUDA stream to use */ void DecodeDeltaByteArray(cudf::detail::hostdevice_vector<PageInfo>& pages, cudf::detail::hostdevice_vector<ColumnChunkDesc> const& chunks, size_t num_rows, size_t min_row, int level_type_size, int32_t* error_code, rmm::cuda_stream_view stream); /** * @brief Launches kernel for initializing encoder row group fragments * * These fragments are used to calculate row group boundaries. * Based on the number of rows in each fragment, populates the value count, the size of data in the * fragment, the number of unique values, and the data size of unique values. * * @param[out] frag Fragment array [column_id][fragment_id] * @param[in] col_desc Column description array [column_id] * @param[in] partitions Information about partitioning of table * @param[in] first_frag_in_part A Partition's offset into fragment array * @param[in] fragment_size Number of rows per fragment * @param[in] stream CUDA stream to use */ void InitRowGroupFragments(cudf::detail::device_2dspan<PageFragment> frag, device_span<parquet_column_device_view const> col_desc, device_span<partition_info const> partitions, device_span<int const> first_frag_in_part, uint32_t fragment_size, rmm::cuda_stream_view stream); /** * @brief Launches kernel for calculating encoder page fragments with variable fragment sizes * * Based on the number of rows in each fragment, populates the value count, the size of data in the * fragment, the number of unique values, and the data size of unique values. * * This assumes an initial call to InitRowGroupFragments has been made. * * @param[out] frag Fragment array [fragment_id] * @param[in] column_frag_sizes Number of rows per fragment per column [column_id] * @param[in] stream CUDA stream to use */ void CalculatePageFragments(device_span<PageFragment> frag, device_span<size_type const> column_frag_sizes, rmm::cuda_stream_view stream); /** * @brief Launches kernel for initializing fragment statistics groups with variable fragment sizes * * @param[out] groups Statistics groups [total_fragments] * @param[in] fragments Page fragments [total_fragments] * @param[in] stream CUDA stream to use */ void InitFragmentStatistics(device_span<statistics_group> groups, device_span<PageFragment const> fragments, rmm::cuda_stream_view stream); /** * @brief Initialize per-chunk hash maps used for dictionary with sentinel values * * @param chunks Flat span of chunks to initialize hash maps for * @param stream CUDA stream to use */ void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream); /** * @brief Insert chunk values into their respective hash maps * * @param frags Column fragments * @param stream CUDA stream to use */ void populate_chunk_hash_maps(cudf::detail::device_2dspan<PageFragment const> frags, rmm::cuda_stream_view stream); /** * @brief Compact dictionary hash map entries into chunk.dict_data * * @param chunks Flat span of chunks to compact hash maps for * @param stream CUDA stream to use */ void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream); /** * @brief Get the Dictionary Indices for each row * * For each row of a chunk, gets the indices into chunk.dict_data which contains the value otherwise * stored in input column [row]. Stores these indices into chunk.dict_index. * * Since dict_data itself contains indices into the original cudf column, this means that * col[row] == col[dict_data[dict_index[row - chunk.start_row]]] * * @param frags Column fragments * @param stream CUDA stream to use */ void get_dictionary_indices(cudf::detail::device_2dspan<PageFragment const> frags, rmm::cuda_stream_view stream); /** * @brief Launches kernel for initializing encoder data pages * * @param[in,out] chunks Column chunks [rowgroup][column] * @param[out] pages Encode page array (null if just counting pages) * @param[in] col_desc Column description array [column_id] * @param[in] num_rowgroups Number of fragments per column * @param[in] num_columns Number of columns * @param[in] page_grstats Setup for page-level stats * @param[in] page_align Required alignment for uncompressed pages * @param[in] write_v2_headers True if V2 page headers should be written * @param[in] chunk_grstats Setup for chunk-level stats * @param[in] max_page_comp_data_size Calculated maximum compressed data size of pages * @param[in] stream CUDA stream to use */ void InitEncoderPages(cudf::detail::device_2dspan<EncColumnChunk> chunks, device_span<EncPage> pages, device_span<size_type> page_sizes, device_span<size_type> comp_page_sizes, device_span<parquet_column_device_view const> col_desc, int32_t num_columns, size_t max_page_size_bytes, size_type max_page_size_rows, uint32_t page_align, bool write_v2_headers, statistics_merge_group* page_grstats, statistics_merge_group* chunk_grstats, rmm::cuda_stream_view stream); /** * @brief Launches kernel for packing column data into parquet pages * * If compression is to be used, `comp_in`, `comp_out`, and `comp_res` will be initialized for * use in subsequent compression operations. * * @param[in,out] pages Device array of EncPages (unordered) * @param[in] write_v2_headers True if V2 page headers should be written * @param[out] comp_in Compressor input buffers * @param[out] comp_out Compressor output buffers * @param[out] comp_res Compressor results * @param[in] stream CUDA stream to use */ void EncodePages(device_span<EncPage> pages, bool write_v2_headers, device_span<device_span<uint8_t const>> comp_in, device_span<device_span<uint8_t>> comp_out, device_span<compression_result> comp_res, rmm::cuda_stream_view stream); /** * @brief Launches kernel to make the compressed vs uncompressed chunk-level decision * * Also calculates the set of page encodings used for each chunk. * * @param[in,out] chunks Column chunks (updated with actual compressed/uncompressed sizes) * @param[in] stream CUDA stream to use */ void DecideCompression(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream); /** * @brief Launches kernel to encode page headers * * @param[in,out] pages Device array of EncPages * @param[in] comp_res Compressor status * @param[in] page_stats Optional page-level statistics to be included in page header * @param[in] chunk_stats Optional chunk-level statistics to be encoded * @param[in] stream CUDA stream to use */ void EncodePageHeaders(device_span<EncPage> pages, device_span<compression_result const> comp_res, device_span<statistics_chunk const> page_stats, statistics_chunk const* chunk_stats, rmm::cuda_stream_view stream); /** * @brief Launches kernel to gather pages to a single contiguous block per chunk * * @param[in,out] chunks Column chunks * @param[in] pages Device array of EncPages * @param[in] stream CUDA stream to use */ void GatherPages(device_span<EncColumnChunk> chunks, device_span<EncPage const> pages, rmm::cuda_stream_view stream); /** * @brief Launches kernel to calculate ColumnIndex information per chunk * * @param[in,out] chunks Column chunks * @param[in] column_stats Page-level statistics to be encoded * @param[in] column_index_truncate_length Max length of min/max values * @param[in] stream CUDA stream to use */ void EncodeColumnIndexes(device_span<EncColumnChunk> chunks, device_span<statistics_chunk const> column_stats, int32_t column_index_truncate_length, rmm::cuda_stream_view stream); } // namespace cudf::io::parquet::detail
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/DoxygenLayout.xml
<doxygenlayout version="1.0"> <!-- Generated by doxygen 1.8.20 --> <!-- Navigation index tabs for HTML output --> <navindex> <tab type="mainpage" visible="yes" title=""/> <tab type="pages" visible="yes" title="" intro=""/> <tab type="user" url="@ref DEVELOPER_GUIDE" title="Developer Guide"/> <tab type="modules" visible="yes" title="" intro=""/> <tab type="namespaces" visible="yes" title=""> <tab type="namespacelist" visible="yes" title="" intro=""/> <tab type="namespacemembers" visible="yes" title="" intro=""/> </tab> <tab type="interfaces" visible="yes" title=""> <tab type="interfacelist" visible="yes" title="" intro=""/> <tab type="interfaceindex" visible="$ALPHABETICAL_INDEX" title=""/> <tab type="interfacehierarchy" visible="yes" title="" intro=""/> </tab> <tab type="classes" visible="yes" title=""> <tab type="classlist" visible="yes" title="" intro=""/> <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/> <tab type="hierarchy" visible="yes" title="" intro=""/> <tab type="classmembers" visible="yes" title="" intro=""/> </tab> <tab type="structs" visible="yes" title=""> <tab type="structlist" visible="yes" title="" intro=""/> <tab type="structindex" visible="$ALPHABETICAL_INDEX" title=""/> </tab> <tab type="exceptions" visible="yes" title=""> <tab type="exceptionlist" visible="yes" title="" intro=""/> <tab type="exceptionindex" visible="$ALPHABETICAL_INDEX" title=""/> <tab type="exceptionhierarchy" visible="yes" title="" intro=""/> </tab> <tab type="files" visible="yes" title=""> <tab type="filelist" visible="yes" title="" intro=""/> <tab type="globals" visible="yes" title="" intro=""/> </tab> <tab type="examples" visible="yes" title="" intro=""/> </navindex> <!-- Layout definition for a class page --> <class> <briefdescription visible="yes"/> <includes visible="$SHOW_INCLUDE_FILES"/> <inheritancegraph visible="$CLASS_GRAPH"/> <collaborationgraph visible="$COLLABORATION_GRAPH"/> <memberdecl> <nestedclasses visible="yes" title=""/> <publictypes title=""/> <services title=""/> <interfaces title=""/> <publicslots title=""/> <signals title=""/> <publicmethods title=""/> <publicstaticmethods title=""/> <publicattributes title=""/> <publicstaticattributes title=""/> <protectedtypes title=""/> <protectedslots title=""/> <protectedmethods title=""/> <protectedstaticmethods title=""/> <protectedattributes title=""/> <protectedstaticattributes title=""/> <packagetypes title=""/> <packagemethods title=""/> <packagestaticmethods title=""/> <packageattributes title=""/> <packagestaticattributes title=""/> <properties title=""/> <events title=""/> <privatetypes title=""/> <privateslots title=""/> <privatemethods title=""/> <privatestaticmethods title=""/> <privateattributes title=""/> <privatestaticattributes title=""/> <friends title=""/> <related title="" subtitle=""/> <membergroups visible="yes"/> </memberdecl> <detaileddescription title=""/> <memberdef> <inlineclasses title=""/> <typedefs title=""/> <enums title=""/> <services title=""/> <interfaces title=""/> <constructors title=""/> <functions title=""/> <related title=""/> <variables title=""/> <properties title=""/> <events title=""/> </memberdef> <allmemberslink visible="yes"/> <usedfiles visible="$SHOW_USED_FILES"/> <authorsection visible="yes"/> </class> <!-- Layout definition for a namespace page --> <namespace> <briefdescription visible="yes"/> <memberdecl> <nestednamespaces visible="yes" title=""/> <constantgroups visible="yes" title=""/> <interfaces visible="yes" title=""/> <classes visible="yes" title=""/> <structs visible="yes" title=""/> <exceptions visible="yes" title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <functions title=""/> <variables title=""/> <membergroups visible="yes"/> </memberdecl> <detaileddescription title=""/> <memberdef> <inlineclasses title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <functions title=""/> <variables title=""/> </memberdef> <authorsection visible="yes"/> </namespace> <!-- Layout definition for a file page --> <file> <briefdescription visible="yes"/> <includes visible="$SHOW_INCLUDE_FILES"/> <includegraph visible="$INCLUDE_GRAPH"/> <includedbygraph visible="$INCLUDED_BY_GRAPH"/> <sourcelink visible="yes"/> <memberdecl> <interfaces visible="yes" title=""/> <classes visible="yes" title=""/> <structs visible="yes" title=""/> <exceptions visible="yes" title=""/> <namespaces visible="yes" title=""/> <constantgroups visible="yes" title=""/> <defines title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <functions title=""/> <variables title=""/> <membergroups visible="yes"/> </memberdecl> <detaileddescription title=""/> <memberdef> <inlineclasses title=""/> <defines title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <functions title=""/> <variables title=""/> </memberdef> <authorsection/> </file> <!-- Layout definition for a group page --> <group> <briefdescription visible="yes"/> <groupgraph visible="$GROUP_GRAPHS"/> <memberdecl> <nestedgroups visible="yes" title=""/> <dirs visible="yes" title=""/> <files visible="yes" title=""/> <namespaces visible="yes" title=""/> <classes visible="yes" title=""/> <defines title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <enumvalues title=""/> <functions title=""/> <variables title=""/> <signals title=""/> <publicslots title=""/> <protectedslots title=""/> <privateslots title=""/> <events title=""/> <properties title=""/> <friends title=""/> <membergroups visible="yes"/> </memberdecl> <detaileddescription title=""/> <memberdef> <pagedocs/> <inlineclasses title=""/> <defines title=""/> <typedefs title=""/> <sequences title=""/> <dictionaries title=""/> <enums title=""/> <enumvalues title=""/> <functions title=""/> <variables title=""/> <signals title=""/> <publicslots title=""/> <protectedslots title=""/> <privateslots title=""/> <events title=""/> <properties title=""/> <friends title=""/> </memberdef> <authorsection visible="yes"/> </group> <!-- Layout definition for a directory page --> <directory> <briefdescription visible="yes"/> <directorygraph visible="yes"/> <memberdecl> <dirs visible="yes"/> <files visible="yes"/> </memberdecl> <detaileddescription title=""/> </directory> </doxygenlayout>
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/modify_fences.sh
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. # This script modifies the GitHub Markdown style code fences in our MD files # into the PHP style that Doxygen supports, allowing us to display code # properly both on the GitHub GUI and in published Doxygen documentation. sed 's/```c++/```{.cpp}/g' "$@"
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/unicode.md
# Unicode Limitations The strings column currently supports only UTF-8 characters internally. For functions that require character testing (e.g. cudf::strings::all_characters_of_type()) or case conversion (e.g. cudf::strings::capitalize(), etc) only the 16-bit [Unicode 13.0](http://www.unicode.org/versions/Unicode13.0.0) character code-points (0-65535) values are supported. Case conversion and character testing on characters above code-point 65535 are not supported. Case conversions that are context-sensitive are not supported. Also, case conversions that result in multiple characters are not reversible. That is, adjacent individual characters will not be case converted to a single character. For example, converting character ß to upper case will result in the characters "SS". But converting "SS" to lower case will produce "ss". Strings case and type APIs: - cudf::strings::all_characters_of_type() - cudf::strings::to_upper() - cudf::strings::to_lower() - cudf::strings::capitalize() - cudf::strings::title() - cudf::strings::swapcase() Also, using regex patterns that use the shorthand character classes `\d \D \w \W \s \S` will include only appropriate characters with code-points between (0-65535).
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/regex.md
# Regex Features This page specifies which regular expression (regex) features are currently supported by libcudf strings column APIs that accept regex patterns: - cudf::strings::contains_re() - cudf::strings::matches_re() - cudf::strings::count_re() - cudf::strings::extract() - cudf::strings::extract_all_record() - cudf::strings::findall() - cudf::strings::replace_re() - cudf::strings::replace_with_backrefs() - cudf::strings::split_re() - cudf::strings::split_record_re() The details are based on features documented at https://www.regular-expressions.info/reference.html **Note:** The alternation character is the pipe character `|` and not the character included in the tables on this page. There is an issue including the pipe character inside the table markdown that is rendered by doxygen. **Invalid regex patterns will result in undefined behavior**. This includes but is not limited to the following: - Unescaped special characters (listed in the third row of the Characters table below) when they are intended to match as literals. - Unmatched paired special characters like `()`, `[]`, and `{}`. - Empty groups, classes, or quantifiers. That is, `()` and `[]` without an enclosing expression and `{}` without a valid integer. - Incomplete ranges in character classes like `[-z]`, `[a-]`, and `[-]`. - Unqualified quantifiers. That is, a quantifier with no preceding item to match like `*a`, `a⎮?`, `(+)`, `{2}a`, etc. ## Features Supported ### Characters | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Literal character | Any character except `[\^$.⎮?*+()` | All characters except the listed special characters match a single instance of themselves | `a` matches `a` | | Literal curly braces | `{` and `}` | `{` and `}` are literal characters, unless they are part of a valid regular expression token such as a quantifier `{3}` | `{` matches `{` | | Backslash escapes a metacharacter | `\` followed by any of `[\^$.⎮?*+(){}` | A backslash escapes special characters to suppress their special meaning | `\*` matches `*` | | Hexadecimal escape | `\xFF` where `FF` are 2 hexadecimal digits | Matches the character at the specified position in the ASCII table | `\x40` matches `@` | | Character escape | `\n`, `\r` and `\t` | Match an line-feed (LF) character, carriage return (CR) character and a tab character respectively | `\r\n` matches a Windows CRLF line break | | Character escape | `\a` | Match the "alert" or "bell" control character (ASCII 0x07) | | | Character escape | `\f` | Match the form-feed control character (ASCII 0x0C) | | | NULL escape | `\0` | Match the NULL character || | Octal escape | `\100` through `\177` <br/> `\200` through `\377` <br/> `\01` through `\07` <br/> `\010` through `\077` | Matches the character at the specified position in the ASCII table | `\100` matches `@` | ### Basic Features | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Dot | . (dot) | Matches any single character except line break characters. Optionally match line break characters. The behavior of the dot when encountering a `\n` character can be controlled by cudf::strings::regex_flags for some regex APIs. | . matches x or (almost) any other character | | Alternation | `⎮` (pipe) | Causes the regex engine to match either the part on the left side, or the part on the right side. Can be strung together into a series of alternations. | `abc⎮def⎮xyz` matches `abc`, `def` or `xyz` | ### Character Classes | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Character class | `[` | `[` begins a character class. | | | Literal character | Any character except `\^-]` | All characters except the listed special characters are literal characters that add themselves to the character class. | `[abc]` matches `a`, `b` or `c` | | Backslash escapes a metacharacter | `\` (backslash) followed by any of `\^-]` | A backslash escapes special characters to suppress their special meaning. | `[\^\]]` matches `^` or `]` | | Range | `-` (hyphen) between two tokens that each specify a single character. | Adds a range of characters to the character class. If '`-`' is the first or last character (e.g. `[a-]` or `[-z]`), it will match a literal '`-`' and not infer a range. | `[a-zA-Z0-9]` matches any ASCII letter or digit | | Negated character class | `^` (caret) immediately after the opening `[` | Negates the character class, causing it to match a single character not listed in the character class. | `[^a-d]` matches `x` (any character except `a`, `b`, `c` or `d`) | | Literal opening bracket | `[` | An opening square bracket is a literal character that adds an opening square bracket to the character class. | `[ab[cd]ef]` matches `aef]`, `bef]`, `[ef]`, `cef]`, and `def]` | | Character escape | `\n`, `\r` and `\t` | Add an LF character, a CR character, or a tab character to the character class, respectively. | `[\n\r\t]` matches a line feed, a carriage return, or a tab character | | Character escape | `\a` | Add the "alert" or "bell" control character (ASCII 0x07) to the character class. | `[\a\t]` matches a bell or a tab character | | Character escape | `\b` | Add the backspace control character (ASCII 0x08) to the character class. | `[\b\t]` matches a backspace or a tab character | | Character escape | `\f` | Add the form-feed control character (ASCII 0x0C) to the character class. | `[\f\t]` matches a form-feed or a tab character | ### Shorthand Character Classes | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Shorthand | `\d` | Adds all digits to the character class. Matches a single digit if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) to include only `[0-9]` | `\d` matches a character that is a digit | | Shorthand | `\w` | Adds all word characters to the character class. Matches a single word character if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) to include only `[0-9A-Za-z_]` | `\w` matches any single word character | | Shorthand | `\s` | Adds all whitespace to the character class. Matches a single whitespace character if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) to include only `[\t- ]` | `\s` matches any single whitespace character | | Shorthand | `\D` | Adds all non-digits to the character class. Matches a single character that is not a digit character if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) | `[\D]` matches a single character that is not a digit character | | Shorthand | `\W` | Adds all non-word characters to the character class. Matches a single character that is not a word character if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) | [`\W`] matches a single character that is not a word character | | Shorthand | `\S` | Adds all non-whitespace to the character class. Matches a single character that is not a whitespace character if used outside character classes. The behavior can be controlled by [cudf::strings::regex_flags::ASCII](@ref cudf::strings::regex_flags) | `[\S]` matches a single character that is not a whitespace character | ### Anchors | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | String anchor | `^` (caret) | Matches at the start of the string | `^.` matches `a` in `abcdef` | | Line anchor | `^` (caret) | When [cudf::strings::regex_flags::MULTILINE](@ref cudf::strings::regex_flags) is specified: Matches after each line break in addition to matching at the start of the string, thus matching at the start of each line in the string. | `^.` matches `a` and `d` in `abc\ndef` | | String anchor | `$` (dollar) | Matches at the end of the string as well as before the final line break in the string | `.$` matches `f` in `abcdef` and in `abcdef\n` | | Line anchor | `$` (dollar) | When [cudf::strings::regex_flags::MULTILINE](@ref cudf::strings::regex_flags) is specified: Matches before each line break in addition to matching at the end of the string, thus matching at the end of each line in the string. | `.$` matches `c` and `f` in `abc\ndef` and in `abc\ndef\n` | | String anchor | `\A` | Matches at the start of the string | `\A\w` matches only `a` in `abc` | | String anchor | `\Z` | Matches at the end of the string | `\w\Z` matches `f` in `abc\ndef` but fails to match `abc\ndef\n` | ### Word Boundaries | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Word boundary | `\b` | Matches at a position that is followed by a word character but not preceded by a word character, or that is preceded by a word character but not followed by a word character. | `\b.` matches `a`, the space, and `d` in `abc def` | | Word boundary | `\B` | Matches at a position that is preceded and followed by a word character, or that is not preceded and not followed by a word character. | `\B.` matches `b`, `c`, `e`, and `f` in `abc def` | ### Quantifiers | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Greedy quantifier | `?` (question mark) | Makes the preceding item optional. Greedy, so the optional item is included in the match if possible. | `abc?` matches `abc` or `ab` | | Greedy quantifier | `*` (star) | Repeats the previous item zero or more times. Greedy, so as many items as possible will be matched before trying permutations with fewer matches of the preceding item, up to the point where the preceding item is not matched at all. | `".*"` matches `"def"` and `"ghi"` in `abc "def" "ghi" jkl` | | Greedy quantifier | `+` (plus) | Repeats the previous item once or more. Greedy, so as many items as possible will be matched before trying permutations with fewer matches of the preceding item, up to the point where the preceding item is matched only once. | `".+"` matches `"def"` and `"ghi"` in `abc "def" "ghi" jkl` | | Lazy quantifier | `??` | Makes the preceding item optional. Lazy, so the optional item is excluded in the match if possible. | `abc??` matches `ab` or `abc` | | Lazy quantifier | `*?` | Repeats the previous item zero or more times. Lazy, so the engine first attempts to skip the previous item, before trying permutations with ever increasing matches of the preceding item. | `".*?"` matches `"def"` and `"ghi"` in `abc "def" "ghi" jkl` | | Lazy quantifier | `+?` | Repeats the previous item once or more. Lazy, so the engine first matches the previous item only once, before trying permutations with ever increasing matches of the preceding item. | `".+?"` matches `"def"` and `"ghi"` in `abc "def" "ghi" jkl` | | Fixed quantifier | `{n}` where `n` is an integer: `0 ≤ n ≤ 999` | Repeats the previous item exactly `n` times. | `a{5}` matches `aaaaa` | | Greedy quantifier | `{n,m}` where `n` and `m` are integers: `0 ≤ n ≤ m ≤ 999` | Repeats the previous item between `n` and `m` times. Greedy, so repeating `m` times is tried before reducing the repetition to `n` times. | `a{2,4}` matches `aaaa`, `aaa` or `aa` | | Greedy quantifier | `{n,}` where `n` is an integer: `0 ≤ n ≤ 999` | Repeats the previous item at least `n` times. Greedy, so as many items as possible will be matched before trying permutations with fewer matches of the preceding item, up to the point where the preceding item is matched only `n` times. | `a{2,}` matches `aaaaa` in `aaaaa` | | Lazy quantifier | `{n,m}?` where `n` and `m` are integers `0 ≤ n ≤ m ≤ 999` | Repeats the previous item between `n` and `m` times. Lazy, so repeating `n` times is tried before increasing the repetition to `m` times. | `a{2,4}?` matches `aa`, `aaa`, or `aaaa` | | Lazy quantifier | `{n,}?` where `n` is an integer: `0 ≤ n ≤ 999` | Repeats the previous item `n` or more times. Lazy, so the engine first matches the previous item `n` times, before trying permutations with ever increasing matches of the preceding item. | `a{2,}?` matches `aa` in `aaaaa` | ### Groups | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Capturing group | `(regex)` | Parentheses group the regex between them. They capture the text matched by the regex inside them into a numbered group. They allow you to apply regex operators to the entire grouped regex. | `(abc⎮def)ghi` matches `abcghi` or `defghi` | | Non-capturing group | `(?:regex)` | Non-capturing parentheses group the regex so you can apply regex operators, but do not capture anything. | `(?:abc⎮def)ghi` matches `abcghi` or `defghi` | ### Replacement Backreferences | Feature | Syntax | Description | Example | | ---------- | ------------- | ------------- | ------------- | | Backreference | `\1` through `\99` | Insert the text matched by capturing groups 1 through 99 | Replacing `(a)(b)(c)` with `\3\3\1` in `abc` yields `cca` | | Backreference | `${1}` through `${99}` | Insert the text matched by capturing groups 1 through 99 | Replacing `(a)(b)(c)` with `${2}.${2}:{$3}` in `abc` yields `b.b:c` | | Whole match | `${0}` | Insert the whole regex match | Replacing `(\d)(a)` with `[${0}]:-${2}_${1};` in `123abc` yields `12[3a]:-a_3;bc`
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/main_page.md
# libcudf libcudf is a C++ GPU DataFrame library for loading, joining, aggregating, filtering, and otherwise manipulating data. A GPU DataFrame is a column-oriented tabular data structure, so libcudf provides two core data structures: cudf::column, and cudf::table.
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/Doxyfile
# Doxyfile 1.9.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the configuration # file that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = libcudf # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = 24.02.00 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all generated output in the proper direction. # Possible values are: None, LTR, RTL and Context. # The default value is: None. OUTPUT_TEXT_DIRECTION = None # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = NO # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line # such as # /*************** # as being the beginning of a Javadoc-style comment "banner". If set to NO, the # Javadoc-style will behave just like regular comments and it will not be # interpreted by doxygen. # The default value is: NO. JAVADOC_BANNER = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # By default Python docstrings are displayed as preformatted text and doxygen's # special commands cannot be used. By setting PYTHON_DOCSTRING to NO the # doxygen's special commands can be used and the contents of the docstring # documentation blocks is shown as doxygen documentation. # The default value is: YES. PYTHON_DOCSTRING = YES # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines (in the resulting output). You can put ^^ in the value part of an # alias to insert a newline as if a physical newline was in the original file. # When you need a literal { or } or , in the value part of an alias you have to # escape them by means of a backslash (\), this can lead to conflicts with the # commands \{ and \} for these it is advised to use the version @{ and @} or use # a double escape (\\{ and \\}) ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice # sources only. Doxygen will then generate output that is more tailored for that # language. For instance, namespaces will be presented as modules, types will be # separated into more groups, etc. # The default value is: NO. OPTIMIZE_OUTPUT_SLICE = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, JavaScript, # Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, # Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the # default for Fortran type files). For instance to make doxygen treat .inc files # as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. When specifying no_extension you should add # * to the FILE_PATTERNS. # # Note see also the list of default file extension mappings. EXTENSION_MAPPING = cu=C++ \ cuh=C++ # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. # Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 5 # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # If one adds a struct or class to a group and this option is enabled, then also # any nested class or struct is added to the same group. By default this option # is disabled and one has to add nested compounds explicitly via \ingroup. # The default value is: NO. GROUP_NESTED_COMPOUNDS = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 # The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use # during processing. When set to 0 doxygen will based this on the number of # cores available in the system. You can set it explicitly to a value larger # than 0 to get more control over the balance between CPU load and processing # speed. At this moment only the input processing can be done using multiple # threads. Since this is still an experimental feature the default is set to 1, # which efficively disables parallel processing. Please report any issues you # encounter. Generating dot graphs in parallel is controlled by the # DOT_NUM_THREADS setting. # Minimum value: 0, maximum value: 32, default value: 1. NUM_PROC_THREADS = 1 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual # methods of a class will be included in the documentation. # The default value is: NO. EXTRACT_PRIV_VIRTUAL = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If this flag is set to YES, the name of an unnamed parameter in a declaration # will be determined by the corresponding definition. By default unnamed # parameters remain unnamed in the output. # The default value is: YES. RESOLVE_UNNAMED_PARAMS = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # declarations. If set to NO, these declarations will be included in the # documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # With the correct setting of option CASE_SENSE_NAMES doxygen will better be # able to match the capabilities of the underlying filesystem. In case the # filesystem is case sensitive (i.e. it supports files in the same directory # whose names only differ in casing), the option must be set to YES to properly # deal with such files in case they appear in the input. For filesystems that # are not case sensitive the option should be be set to NO to properly deal with # output files written for symbols that only differ in casing, such as for two # classes, one named CLASS and the other named Class, and to also support # references to files without having to specify the exact matching casing. On # Windows (including Cygwin) and MacOS, users should typically set this option # to NO, whereas on Linux or other Unix flavors it should typically be set to # YES. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if <section_label> ... \endif and \cond <section_label> # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = DoxygenLayout.xml # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete # parameter documentation, but not about the absence of documentation. If # EXTRACT_ALL is set to YES then this flag will automatically be disabled. # The default value is: NO. WARN_NO_PARAMDOC = YES # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when # a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS # then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but # at the end of the doxygen process doxygen will return with a non-zero status. # Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = main_page.md \ regex.md \ unicode.md \ developer_guide/BENCHMARKING.md \ developer_guide/DOCUMENTATION.md \ developer_guide/DEVELOPER_GUIDE.md \ developer_guide/TESTING.md \ ../include \ ../include/cudf_test/column_wrapper.hpp \ ../include/cudf_test/column_utilities.hpp \ ../include/cudf_test/iterator_utilities.hpp \ ../include/cudf_test/table_utilities.hpp \ ../include/cudf_test/type_lists.hpp \ ../include/cudf_test/type_list_utilities.hpp \ ../libcudf_kafka/include # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: # https://www.gnu.org/software/libiconv/) for the list of possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # # Note the list of default checked file patterns might differ from the list of # default file extension mappings. # # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), # *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, # *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.cpp \ *.hpp \ *.h \ *.c \ *.cu \ *.cuh # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = */nvtx/* \ */detail/* \ */cudf_test/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = org::apache \ *_impl \ *Impl # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # <filter> <input-file> # # where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. FILTER_PATTERNS = *.md=./modify_fences.sh # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = main_page.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 266 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 255 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 52 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to YES can help to show when doxygen was last run and thus if the # documentation is up to date. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that # are dynamically created via JavaScript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML # page. Disable this option to support browsers that do not have JavaScript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_MENUS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: # https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To # create a documentation set, doxygen will generate a Makefile in the HTML # output directory. Running make will produce the docset in that directory and # running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: # https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location (absolute path # including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to # run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg # tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see # https://inkscape.org) to generate formulas as SVG images instead of PNGs for # the HTML output. These images will generally look nicer at scaled resolutions. # Possible values are: png (the default) and svg (looks nicer but requires the # pdf2svg or inkscape tool). # The default value is: png. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FORMULA_FORMAT = png # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands # to create new LaTeX commands to be used in formulas as building blocks. See # the section "Including formulas" for details. FORMULA_MACROFILE = # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. # The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use <access key> + S # (what the <access key> is depends on the OS and browser, but it is typically # <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down # key> to jump into the search results window, the results can be navigated # using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel # the search. The filter options can be selected when the cursor is inside the # search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> # to select a filter and <Enter> or <escape> to activate or cancel the filter # option. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using JavaScript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing # and searching needs to be provided by external tools. See the section # "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain the # search results. # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: # https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will return the search results when EXTERNAL_SEARCH is enabled. # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: # https://xapian.org/). See the section "External Indexing and Searching" for # details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. # The default file is: searchdata.xml. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of # to a relative location where the documentation can be found. The format is: # EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... # This tag requires that the tag SEARCHENGINE is set to YES. EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. # The default value is: YES. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: latex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # # Note that when not enabling USE_PDFLATEX the default is latex when enabling # USE_PDFLATEX the default is pdflatex and when in the later case latex is # chosen this is overwritten by pdflatex. For specific output languages the # default can have been set differently, this depends on the implementation of # the output language. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. # Note: This tag is used in the Makefile / make.bat. # See also: LATEX_MAKEINDEX_CMD for the part in the generated output file # (.tex). # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex # The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to # generate index for LaTeX. In case there is no backslash (\) as first character # it will be automatically added in the LaTeX code. # Note: This tag is used in the generated output file (.tex). # See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat. # The default value is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_MAKEINDEX_CMD = makeindex # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used by the # printer. # Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x # 14 inches) and executive (7.25 x 10.5 inches). # The default value is: a4. # This tag requires that the tag GENERATE_LATEX is set to YES. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names # that should be included in the LaTeX output. The package can be specified just # by its name or with the correct syntax as to be used with the LaTeX # \usepackage command. To get the times font for instance you can specify : # EXTRA_PACKAGES=times or EXTRA_PACKAGES={times} # To use the option intlimits with the amsmath package you can specify: # EXTRA_PACKAGES=[intlimits]{amsmath} # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first # chapter. If it is left blank doxygen will generate a standard header. See # section "Doxygen usage" for information on how to let doxygen write the # default header to a separate file. # # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, # $datetime, $date, $doxygenversion, $projectname, $projectnumber, # $projectbrief, $projectlogo. Doxygen will replace $title with the empty # string, for the replacement values of the other commands the user is referred # to HTML_HEADER. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the # generated LaTeX document. The footer should contain everything after the last # chapter. If it is left blank doxygen will generate a standard footer. See # LATEX_HEADER for more information on how to generate a default footer and what # special commands can be used inside the footer. # # Note: Only use a user-defined footer if you know what you are doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = # The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined # LaTeX style sheets that are included after the standard style sheets created # by doxygen. Using this option one can overrule certain style aspects. Doxygen # will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EXTRA_STYLESHEET = # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the LATEX_OUTPUT output # directory. Note that the files will be copied as-is; there are no commands or # markers available. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EXTRA_FILES = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is # prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will # contain links (just like the HTML output) instead of page references. This # makes the output suitable for online browsing using a PDF viewer. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as # specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX # files. Set this option to YES, to get a higher quality PDF documentation. # # See also section LATEX_CMD_NAME for selecting the engine. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running # if errors occur, instead of asking the user for help. This option is also used # when generating formulas in HTML. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BATCHMODE = NO # If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the # index chapters (such as File Index, Compound Index, etc.) in the output. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HIDE_INDICES = NO # If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source # code with syntax highlighting in the LaTeX output. # # Note that which sources are shown also depends on other settings such as # SOURCE_BROWSER. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # https://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BIB_STYLE = plain # If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_TIMESTAMP = NO # The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) # path from which the emoji images will be read. If a relative path is entered, # it will be relative to the LATEX_OUTPUT directory. If left blank the # LATEX_OUTPUT directory will be used. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EMOJI_DIRECTORY = #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The # RTF output is optimized for Word 97 and may not look too pretty with other RTF # readers/editors. # The default value is: NO. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: rtf. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will # contain hyperlink fields. The RTF file will contain links (just like the HTML # output) instead of page references. This makes the output suitable for online # browsing using Word or some other Word compatible readers that support those # fields. # # Note: WordPad (write) and others do not support links. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # configuration file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is # similar to doxygen's configuration file. A template extensions file can be # generated using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = # If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code # with syntax highlighting in the RTF output. # # Note that which sources are shown also depends on other settings such as # SOURCE_BROWSER. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_SOURCE_CODE = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for # classes and files. # The default value is: NO. GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. A directory man3 will be created inside the directory specified by # MAN_OUTPUT. # The default directory is: man. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to the generated # man pages. In case the manual section does not start with a number, the number # 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is # optional. # The default value is: .3. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_EXTENSION = .3 # The MAN_SUBDIR tag determines the name of the directory created within # MAN_OUTPUT in which the man pages are placed. If defaults to man followed by # MAN_EXTENSION with the initial . removed. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_SUBDIR = # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it # will generate one additional man file for each entity documented in the real # man page(s). These additional files only source the real man page, but without # them the man command would be unable to find the correct page. # The default value is: NO. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_LINKS = NO #--------------------------------------------------------------------------- # Configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that # captures the structure of the code including all documentation. # The default value is: NO. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: xml. # This tag requires that the tag GENERATE_XML is set to YES. XML_OUTPUT = xml # If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program # listings (including syntax highlighting and cross-referencing information) to # the XML output. Note that enabling this will significantly increase the size # of the XML output. # The default value is: YES. # This tag requires that the tag GENERATE_XML is set to YES. XML_PROGRAMLISTING = YES # If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include # namespace members in file scope as well, matching the HTML output. # The default value is: NO. # This tag requires that the tag GENERATE_XML is set to YES. XML_NS_MEMB_FILE_SCOPE = NO #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- # If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files # that can be used to generate PDF. # The default value is: NO. GENERATE_DOCBOOK = NO # The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be put in # front of it. # The default directory is: docbook. # This tag requires that the tag GENERATE_DOCBOOK is set to YES. DOCBOOK_OUTPUT = docbook # If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the # program listings (including syntax highlighting and cross-referencing # information) to the DOCBOOK output. Note that enabling this will significantly # increase the size of the DOCBOOK output. # The default value is: NO. # This tag requires that the tag GENERATE_DOCBOOK is set to YES. DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an # AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures # the structure of the code including all documentation. Note that this feature # is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # Configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module # file that captures the structure of the code including all documentation. # # Note that this feature is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI # output from the Perl module output. # The default value is: NO. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely # formatted so it can be parsed by a human reader. This is useful if you want to # understand what is going on. On the other hand, if this tag is set to NO, the # size of the Perl module output will be much smaller and Perl will parse it # just the same. # The default value is: YES. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file are # prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful # so different doxyrules.make files included by the same Makefile don't # overwrite each other's variables. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all # C-preprocessor directives found in the sources and include files. # The default value is: YES. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names # in the source code. If set to NO, only conditional compilation will be # performed. Macro expansion can be done in a controlled way by setting # EXPAND_ONLY_PREDEF to YES. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and # EXPAND_AS_DEFINED tags. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by the # preprocessor. # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will be # used. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that are # defined before the preprocessor is started (similar to the -D option of e.g. # gcc). The argument of the tag is a list of macros of the form: name or # name=definition (no spaces). If the definition and the "=" are omitted, "=1" # is assumed. To prevent a macro definition from being undefined via #undef or # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. PREDEFINED = __device__= \ __host__= \ DOXYGEN_SHOULD_SKIP_THIS # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED # tag if you want to use a different macro definition that overrules the # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all references to function-like macros that are alone on a line, have # an all uppercase name, and do not end with a semicolon. Such function macros # are typically used for boiler-plate code, and will confuse the parser if not # removed. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration options related to external references #--------------------------------------------------------------------------- # The TAGFILES tag can be used to specify one or more tag files. For each tag # file the location of the external documentation should be added. The format of # a tag file without this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where loc1 and loc2 can be relative or absolute paths or URLs. See the # section "Linking to external documentation" for more information about the use # of tag files. # Note: Each tag file must have a unique name (where the name does NOT include # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. TAGFILES = rmm.tag=https://docs.rapids.ai/api/librmm/24.02 # When a file name is specified after GENERATE_TAGFILE, doxygen will create a # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. GENERATE_TAGFILE = html/libcudf.tag # If the ALLEXTERNALS tag is set to YES, all external class will be listed in # the class index. If set to NO, only the inherited external classes will be # listed. # The default value is: NO. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed # in the modules index. If set to NO, only the current project's groups will be # listed. # The default value is: YES. EXTERNAL_GROUPS = YES # If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in # the related pages index. If set to NO, only the current project's pages will # be listed. # The default value is: YES. EXTERNAL_PAGES = YES #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to # NO turns the diagrams off. Note that this option also works with HAVE_DOT # disabled, but it is recommended to install and use dot, since it yields more # powerful graphs. # The default value is: YES. CLASS_DIAGRAMS = YES # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. # If left empty dia is assumed to be found in the default search path. DIA_PATH = # If set to YES the inheritance and collaboration graphs will hide inheritance # and usage relations if the target is undocumented or is not a class. # The default value is: YES. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz (see: # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent # Bell Labs. The other options in this section have no effect if this option is # set to NO # The default value is: NO. HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed # to run in parallel. When set to 0 doxygen will base this on the number of # processors available in the system. You can set it explicitly to a value # larger than 0 to get control over the balance between CPU load and processing # speed. # Minimum value: 0, maximum value: 32, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. DOT_NUM_THREADS = 0 # When you want a differently looking font in the dot files that doxygen # generates you can specify the font name using DOT_FONTNAME. You need to make # sure dot is able to find the font, which can be done by putting it in a # standard location or by setting the DOTFONTPATH environment variable or by # setting DOT_FONTPATH to the directory containing the font. # The default value is: Helvetica. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size (in points) of the font of # dot graphs. # Minimum value: 4, maximum value: 24, default value: 10. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the default font as specified with # DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set # the path where dot can find it using this tag. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTPATH = # If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for # each documented class showing the direct and indirect inheritance relations. # Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a # graph for each documented class showing the direct and indirect implementation # dependencies (inheritance, containment, and class references variables) of the # class with other documented classes. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for # groups, showing the direct groups dependencies. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES, doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside the # class node. If there are many fields or methods and many nodes the graph may # become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the # number of items for each type to make the size more manageable. Set this to 0 # for no limit. Note that the threshold may be exceeded by 50% before the limit # is enforced. So when you set the threshold to 10, up to 15 fields may appear, # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. # This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 # If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and # methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS # tag is set to YES, doxygen will add type and arguments for attributes and # methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen # will not generate fields with class member information in the UML graphs. The # class diagrams will look similar to the default class diagrams but using UML # notation for the relationships. # Possible values are: NO, YES and NONE. # The default value is: NO. # This tag requires that the tag UML_LOOK is set to YES. DOT_UML_DETAILS = NO # The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters # to display on a single line. If the actual line length exceeds this threshold # significantly it will wrapped across multiple lines. Some heuristics are apply # to avoid ugly line breaks. # Minimum value: 0, maximum value: 1000, default value: 17. # This tag requires that the tag HAVE_DOT is set to YES. DOT_WRAP_THRESHOLD = 17 # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. TEMPLATE_RELATIONS = NO # If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to # YES then doxygen will generate a graph for each documented file showing the # direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDE_GRAPH = YES # If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are # set to YES then doxygen will generate a graph for each documented file showing # the direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH tag is set to YES then doxygen will generate a call # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. Disabling a call graph can be # accomplished by means of the command \hidecallgraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALL_GRAPH = NO # If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable caller graphs for selected # functions only using the \callergraph command. Disabling a caller graph can be # accomplished by means of the command \hidecallergraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical # hierarchy of all classes instead of a textual one. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the # dependencies a directory has on other directories in a graphical way. The # dependency relations are determined by the #include relations between the # files in the directories. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. For an explanation of the image formats see the section # output formats in the documentation of the dot tool (Graphviz (see: # http://www.graphviz.org/)). # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order # to make the SVG files visible in IE 9+ (other browsers do not have this # requirement). # Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo, # png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and # png:gdiplus:gdiplus. # The default value is: png. # This tag requires that the tag HAVE_DOT is set to YES. DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # # Note that this requires a modern browser other than Internet Explorer. Tested # and working are Firefox, Chrome, Safari, and Opera. # Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make # the SVG files visible. Older versions of IE do not have SVG support. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. INTERACTIVE_SVG = NO # The DOT_PATH tag can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. # This tag requires that the tag HAVE_DOT is set to YES. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the \dotfile # command). # This tag requires that the tag HAVE_DOT is set to YES. DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the \mscfile # command). MSCFILE_DIRS = # The DIAFILE_DIRS tag can be used to specify one or more directories that # contain dia files that are included in the documentation (see the \diafile # command). DIAFILE_DIRS = # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the # path where java can find the plantuml.jar file. If left blank, it is assumed # PlantUML is not used or called during a preprocessing step. Doxygen will # generate a warning when it encounters a \startuml command in this case and # will not generate output for the diagram. PLANTUML_JAR_PATH = # When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a # configuration file for plantuml. PLANTUML_CFG_FILE = # When using plantuml, the specified paths are searched for files specified by # the !include statement in a plantuml block. PLANTUML_INCLUDE_PATH = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized # by representing a node as a red box. Note that doxygen if the number of direct # children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that # the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. # Minimum value: 0, maximum value: 10000, default value: 50. # This tag requires that the tag HAVE_DOT is set to YES. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs # generated by dot. A depth value of 3 means that only nodes reachable from the # root by following a path via at most 3 edges will be shown. Nodes that lay # further from the root node will be omitted. Note that setting this option to 1 # or 2 may greatly reduce the computation time needed for large code bases. Also # note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. # Minimum value: 0, maximum value: 1000, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not seem # to support this out of the box. # # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support # this, this feature is disabled by default. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. # # Note: This setting is not only used for dot files but also for msc and # plantuml temporary files. # The default value is: YES. DOT_CLEANUP = YES
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/doxygen/header.html
<!-- HTML header for doxygen 1.8.20--> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen $doxygenversion"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME--> <!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME--> <link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="$relpath^jquery.js"></script> <script type="text/javascript" src="$relpath^dynsections.js"></script> $treeview $search $mathjax <link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" /> $extrastylesheet <!-- RAPIDS CUSTOM JS & CSS: START, Please add these two lines back after every version upgrade --> <script defer src="https://docs.rapids.ai/assets/js/custom.js"></script> <link rel="stylesheet" href="https://docs.rapids.ai/assets/css/custom.css"> <!-- RAPIDS CUSTOM JS & CSS: END --> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <!--BEGIN TITLEAREA--> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <!--BEGIN PROJECT_LOGO--> <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td> <!--END PROJECT_LOGO--> <!--BEGIN PROJECT_NAME--> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">$projectname <!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER--> </div> <!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF--> </td> <!--END PROJECT_NAME--> <!--BEGIN !PROJECT_NAME--> <!--BEGIN PROJECT_BRIEF--> <td style="padding-left: 0.5em;"> <div id="projectbrief">$projectbrief</div> </td> <!--END PROJECT_BRIEF--> <!--END !PROJECT_NAME--> <!--BEGIN DISABLE_INDEX--> <!--BEGIN SEARCHENGINE--> <td>$searchbox</td> <!--END SEARCHENGINE--> <!--END DISABLE_INDEX--> </tr> </tbody> </table> </div> <!--END TITLEAREA--> <!-- end header part -->
0
rapidsai_public_repos/cudf/cpp/doxygen
rapidsai_public_repos/cudf/cpp/doxygen/developer_guide/DOCUMENTATION.md
# libcudf C++ Documentation Guide These guidelines apply to documenting all libcudf C++ source files using doxygen style formatting although only public APIs and classes are actually [published](https://docs.rapids.ai/api/libcudf/stable/index.html). ## Copyright License The copyright comment is included here but may also be mentioned in a coding guideline document as well. The following is the license header comment that should appear at the beginning of every C++ source file. /* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ The comment should start with `/*` and not `/**` so it is not processed by doxygen. Also, here are the rules for the copyright year. - A new file should have the year in which it was created - A modified file should span the year it was created and the year it was modified (e.g. `2019-2021`) Changing the copyright year may not be necessary if no content has changed (e.g. reformatting only). ## Doxygen The [doxygen tool](https://www.doxygen.nl/manual/index.html) is used to generate HTML pages from the C++ comments in the source code. Doxygen recognizes and parses block comments and performs specialized output formatting when it encounters [doxygen commands](https://www.doxygen.nl/manual/commands.html). There are almost 200 commands (also called tags in this document) that doxygen recognizes in comment blocks. This document provides guidance on which commands/tags to use and how to use them in the libcudf C++ source code. The doxygen process can be customized using options in the [Doxyfile](../doxygen/Doxyfile). Here are some of the custom options in the Doxyfile for libcudf. | Option | Setting | Description | | ------ | ------- | ----------- | | PROJECT_NAME | libcudf | Title used on the main page | | PROJECT_NUMBER | 22.02.00 | Version number | | EXTENSION_MAPPING | cu=C++ cuh=C++ | Process `cu` and `cuh` as C++ | | INPUT | main_page.md regex.md unicode.md ../include | Embedded markdown files and source code directories to process | | FILE_PATTERNS | *.cpp *.hpp *.h *.c *.cu *.cuh | File extensions to process | ## Block Comments Use the following style for block comments describing functions, classes and other types, groups, and files. /** * description text and * doxygen tags go here */ Doxygen comment blocks start with `/**` and end with `*/` only, and with nothing else on those lines. Do not add dashes `-----` or extra asterisks `*****` to the first and last lines of a doxygen block. The block must be placed immediately before the source code line to which it refers. The block may be indented to line up vertically with the item it documents as appropriate. See the [Example](#the-example) section below. Each line in the comment block between the `/**` and `*/` lines should start with a space followed by an asterisk. Any text on these lines, including tag declarations, should start after a single space after the asterisk. ## Tag/Command names Use @ to prefix doxygen commands (e.g. \@brief, \@code, etc.) ## Markdown The doxygen tool supports a limited set of markdown format in the comment block including links, tables, lists, etc. In some cases a trade-off may be required for readability in the source text file versus the readability in the doxygen formatted web pages. For example, there are some limitations on readability with '%' character and pipe character '|' within a markdown table. Avoid using direct HTML tags. Although doxygen supports markdown and markdown supports HTML tags, the HTML support for doxygen's markdown is also limited. ## The Example The following example covers most of the doxygen block comment and tag styles for documenting C++ code in libcudf. /** * @file source_file.cpp * @brief Description of source file contents * * Longer description of the source file contents. */ /** * @brief One sentence description of the class. * * @ingroup optional_predefined_group_id * * Longer, more detailed description of the class. * * @tparam T Short description of each template parameter * @tparam U Short description of each template parameter */ template <typename T, typename U> class example_class { void get_my_int(); ///< Simple members can be documented like this void set_my_int( int value ); ///< Try to use descriptive member names /** * @brief Short, one sentence description of the member function. * * A more detailed description of what this function does and what * its logic does. * * @code * example_class<int> inst; * inst.set_my_int(5); * int output = inst.complicated_function(1,dptr,fptr); * @endcode * * @param[in] first This parameter is an input parameter to the function * @param[in,out] second This parameter is used both as an input and output * @param[out] third This parameter is an output of the function * * @return The result of the complex function */ T complicated_function(int first, double* second, float* third) { // Do not use doxygen-style block comments // for code logic documentation. } private: int my_int; ///< An example private member variable }; /** * @brief Short, one sentence description of this free function. * * @ingroup optional_predefined_group_id * * A detailed description must start after a blank line. * * @code * template<typename T> * struct myfunctor { * bool operator()(T input) { return input % 2 > 0; } * }; * free_function<myfunctor,int>(myfunctor{},12); * @endcode * * @throw cudf::logic_error if `input_argument` is negative or zero * * @tparam functor_type The type of the functor * @tparam input_type The datatype of the input argument * * @param[in] functor The functor to be called on the input argument * @param[in] input_argument The input argument passed into the functor * @return The result of calling the functor on the input argument */ template <class functor_type, typename input_type> bool free_function(functor_type functor, input_type input_argument) { CUDF_EXPECTS( input_argument > 0, "input_argument must be positive"); return functor(input_argument); } /** * @brief Short, one sentence description. * * @ingroup optional_predefined_group_id * * Optional, longer description. */ enum class example_enum { first_enum, ///< Description of the first enum second_enum, ///< Description of the second enum third_enum ///< Description of the third enum }; ## Descriptions The comment description should clearly detail how the output(s) are created from any inputs. Include any performance and any boundary considerations. Also include any limits on parameter values and if any default values are declared. Don't forget to specify how nulls are handled or produced. Also, try to include a short [example](#inline-examples) if possible. ### @brief The [\@brief](https://www.doxygen.nl/manual/commands.html#cmdbrief) text should be a short, one sentence description. Doxygen does not provide much space to show this text in the output pages. Always follow the \@brief line with a blank comment line. The longer description is the rest of the comment text that is not tagged with any doxygen command. /** * @brief Short description. * * Long description. * ### \@copydoc Documentation for declarations in headers should be clear and complete. You can use the [\@copydoc](https://www.doxygen.nl/manual/commands.html#cmdcopydoc) tag to avoid duplicating the comment block for a function definition. /** * @copydoc complicated_function(int,double*,float*) * * Any extra documentation. */ Also, \@copydoc is useful when documenting a `detail` function that differs only by the `stream` parameter. /** * @copydoc cudf::segmented_count_set_bits(bitmask_type const*,std::vector<size_type> const&) * * @param[in] stream Optional CUDA stream on which to execute kernels */ std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask, std::vector<size_type> const& indices, rmm::cuda_stream_view stream = cudf::get_default_stream()); Note, you must specify the whole signature of the function, including optional parameters, so that doxygen will be able to locate it. ### Function parameters The following tags should appear near the end of function comment block in the order specified here: | Command | Description | | ------- | ----------- | | [\@throw](#throw) | Specify the conditions in which the function may throw an exception | | [\@tparam](#tparam) | Description for each template parameter | | [\@param](#param) | Description for each function parameter | | [\@return](#return) | Short description of object or value returned | #### \@throw Add an [\@throw](https://www.doxygen.nl/manual/commands.html#cmdthrow) comment line in the doxygen block for each exception that the function may throw. You only need to include exceptions thrown by the function itself. If the function calls another function that may throw an exception, you do not need to document those exceptions here. Include the name of the exception without backtick marks so doxygen can add reference links correctly. * * @throw cudf::logic_error if `input_argument` is negative or zero * Using \@throws is also acceptable but VS Code and other tools only do syntax highlighting on \@throw. #### @tparam Add a [\@tparam](https://www.doxygen.nl/manual/commands.html#cmdtparam) comment line for each template parameter declared by this function. The name of the parameter specified after the doxygen tag must match exactly to the template parameter name. * * @tparam functor_type The type of the functor * @tparam input_type The datatype of the input argument * The definition should detail the requirements of the parameter. For example, if the template is for a functor or predicate, then describe the expected input types and output. #### @param Add a [\@param](https://www.doxygen.nl/manual/commands.html#cmdparam) comment line for each function parameter passed to this function. The name of the parameter specified after the doxygen tag must match the function's parameter name. Also include append `[in]`, `[out]` or `[in,out]` to the `@param` if it is not clear from the declaration and the parameter name itself. * * @param[in] first This parameter is an input parameter to the function * @param[in,out] second This parameter is used both as an input and output * @param[out] third This parameter is an output of the function * It is also recommended to vertically aligning the 3 columns of text if possible to make it easier to read in a source code editor. #### @return Add a single [\@return](https://www.doxygen.nl/manual/commands.html#cmdreturn) comment line at the end of the comment block if the function returns an object or value. Include a brief description of what is returned. /** * ... * * @return A new column of type INT32 and no nulls */ Do not include the type of the object returned with the `@return` comment. ### Inline Examples It is usually helpful to include a source code example inside your comment block when documenting a function or other declaration. Use the [\@code](https://www.doxygen.nl/manual/commands.html#cmdcode) and [\@endcode](https://www.doxygen.nl/manual/commands.html#cmdendcode) pair to include inline examples. Doxygen supports syntax highlighting for C++ and several other programming languages (e.g. Python, Java). By default, the \@code tag uses syntax highlighting based on the source code in which it is found. * * @code * auto result = cudf::make_column( ); * @endcode * You can specify a different language by indicating the file extension in the tag: * * @code{.py} * import cudf * s = cudf.Series([1,2,3]) * @endcode * If you wish to use pseudocode in your example, use the following: * * Sometimes pseudocode is clearer. * @code{.pseudo} * s = int column of [ 1, 2, null, 4 ] * r = fill( s, [1, 2], 0 ) * r is now [ 1, 0, 0, 4 ] * @endcode * When writing example snippets, using fully qualified class names allows doxygen to add reference links to the example. * * @code * auto result1 = make_column( ); // reference link will not be created * auto result2 = cudf::make_column( ); // reference link will be created * @endcode * Although using 3 backtick marks \`\`\` for example blocks will work too, they do not stand out as well in VS Code and other source editors. Do not use the `@example` tag in the comments for a declaration, or doxygen will interpret the entire source file as example source code. The source file is then published under a separate _Examples_ page in the output. ### Deprecations Add a single [\@deprecated](https://www.doxygen.nl/manual/commands.html#cmddeprecated) comment line to comment blocks for APIs that will be removed in future releases. Mention alternative / replacement APIs in the deprecation comment. /** * ... * * @deprecated This function is deprecated. Use another new function instead. */ ## Namespaces Doxygen output includes a _Namespaces_ page that shows all the namespaces declared with comment blocks in the processed files. Here is an example of a doxygen description comment for a namespace declaration. /** * @brief cuDF interfaces * * This is the top-level namespace which contains all cuDF functions and types. */ namespace cudf { A description comment should be included only once for each unique namespace declaration. Otherwise, if more than one description is found, doxygen aggregates the descriptions in an arbitrary order in the output pages. If you introduce a new namespace, provide a description block for only one declaration and not for every occurrence. ## Groups/Modules Grouping declarations into modules helps users to find APIs in the doxygen pages. Generally, common functions are already grouped logically into header files but doxygen does not automatically group them this way in its output. The doxygen output includes a _Modules_ page that organizes items into groups specified using the [Grouping doxygen commands](https://www.doxygen.nl/manual/grouping.html). These commands can group common functions across header files, source files, and even namespaces. Groups can also be nested by defining new groups within existing groups. For libcudf, all the group hierarchy is defined in the [doxygen_groups.h](../include/doxygen_groups.h) header file. The [doxygen_groups.h](../include/doxygen_groups.h) file does not need to be included in any other source file, because the definitions in this file are used only by the doxygen tool to generate groups in the _Modules_ page. Modify this file only to add or update groups. The existing groups have been carefully structured and named, so new groups should be added thoughtfully. When creating a new API, specify its group using the [\@ingroup](https://www.doxygen.nl/manual/commands.html#cmdingroup) tag and the group reference id from the [doxygen_groups.h](../include/doxygen_groups.h) file. namespace cudf { /** * @brief ... * * @ingroup transformation_fill * * @param ... * @return ... */ std::unique_ptr<column> fill(table_view const& input,...); } // namespace cudf You can also use the \@addtogroup with a `@{ ... @}` pair to automatically include doxygen comment blocks as part of a group. namespace cudf { /** * @addtogroup transformation_fill * @{ */ /** * @brief ... * * @param ... * @return ... */ std::unique_ptr<column> fill(table_view const& input,...); /** @} */ } // namespace cudf This just saves adding \@ingroup to individual doxygen comment blocks within a file. Make sure a blank line is included after the \@addtogroup command block so doxygen knows it does not apply to whatever follows in the source code. Note that doxygen will not assign groups to items if the \@addtogroup with `@{ ... @}` pair includes a namespace declaration. So include the `@addtogroup` and `@{ ... @}` between the namespace declaration braces as shown in the example above. Summary of groups tags | Tag/Command | Where to use | | ----------- | ------------ | | `@defgroup` | For use only in [doxygen_groups.h](../include/doxygen_groups.h) and should include the group's title. | | `@ingroup` | Use inside individual doxygen block comments for declaration statements in a header file. | | `@addtogroup` | Use instead of `@ingroup` for multiple declarations in the same file within a namespace declaration. Do not specify a group title. | | `@{ ... @}` | Use only with `@addtogroup`. | ## Build Doxygen Output We recommend installing Doxygen using conda (`conda install doxygen`) or a Linux package manager (`sudo apt install doxygen`). Alternatively you can [build and install doxygen from source](https://www.doxygen.nl/manual/install.html). To build the libcudf HTML documentation simply run the `doxygen` command from the `cpp/doxygen` directory containing the `Doxyfile`. The libcudf documentation can also be built using `cmake --build . --target docs_cudf` from the cmake build directory (e.g. `cpp/build`). Doxygen reads and processes all appropriate source files under the `cpp/include/` directory. The output is generated in the `cpp/doxygen/html/` directory. You can load the local `index.html` file generated there into any web browser to view the result. To view docs built on a remote server, you can run a simple HTTP server using Python: `cd html && python -m http.server`. Then open `<IP address>:8000` in your local web browser, inserting the IP address of the machine on which you ran the HTTP server. The doxygen output is intended for building documentation only for the public APIs and classes. For example, the output should not include documentation for `detail` or `/src` files, and these directories are excluded in the `Doxyfile` configuration. When published by the build/CI system, the doxygen output will appear on our external [RAPIDS web site](https://docs.rapids.ai/api/libcudf/stable/index.html).
0
rapidsai_public_repos/cudf/cpp/doxygen
rapidsai_public_repos/cudf/cpp/doxygen/developer_guide/BENCHMARKING.md
# Unit Benchmarking in libcudf Unit benchmarks in libcudf are written using [NVBench](https://github.com/NVIDIA/nvbench). While many existing benchmarks are written using [Google Benchmark](https://github.com/google/benchmark), new benchmarks should use NVBench. The NVBench library is similar to Google Benchmark, but has several quality of life improvements when doing GPU benchmarking such as displaying the fraction of peak memory bandwidth achieved and details about the GPU hardware. Both NVBench and Google Benchmark provide many options for specifying ranges of parameters to benchmark, as well as to control the time unit reported, among other options. Refer to existing benchmarks in `cpp/benchmarks` to understand the options. ## Directory and File Naming The naming of unit benchmark directories and source files should be consistent with the feature being benchmarked. For example, the benchmarks for APIs in `copying.hpp` should live in `cpp/benchmarks/copying`. Each feature (or set of related features) should have its own benchmark source file named `<feature>.cu/cpp`. For example, `cpp/src/copying/scatter.cu` has benchmarks in `cpp/benchmarks/copying/scatter.cu`. In the interest of improving compile time, whenever possible, test source files should be `.cpp` files because `nvcc` is slower than `gcc` in compiling host code. Note that `thrust::device_vector` includes device code, and so must only be used in `.cu` files. `rmm::device_uvector`, `rmm::device_buffer` and the various `column_wrapper` types described in [Testing](TESTING.md) can be used in `.cpp` files, and are therefore preferred in test code over `thrust::device_vector`. ## CUDA Asynchrony and benchmark accuracy CUDA computations and operations like copies are typically asynchronous with respect to host code, so it is important to carefully synchronize in order to ensure the benchmark timing is not stopped before the feature you are benchmarking has completed. An RAII helper class `cuda_event_timer` is provided in `cpp/benchmarks/synchronization/synchronization.hpp` to help with this. This class can also optionally clear the GPU L2 cache in order to ensure cache hits do not artificially inflate performance in repeated iterations. ## Data generation For generating benchmark input data, helper functions are available at [cpp/benchmarks/common/generate_input.hpp](/cpp/benchmarks/common/generate_input.hpp). The input data generation happens on device, in contrast to any `column_wrapper` where data generation happens on the host. * `create_sequence_table` can generate sequence columns starting with value 0 in first row and increasing by 1 in subsequent rows. * `create_random_column` can generate a column filled with random data. The random data parameters are configurable. * `create_random_table` can generate a table of columns filled with random data. The random data parameters are configurable. ## What should we benchmark? In general, we should benchmark all features over a range of data sizes and types, so that we can catch regressions across libcudf changes. However, running many benchmarks is expensive, so ideally we should sample the parameter space in such a way to get good coverage without having to test exhaustively. A rule of thumb is that we should benchmark with enough data to reach the point where the algorithm reaches its saturation bottleneck, whether that bottleneck is bandwidth or computation. Using data sets larger than this point is generally not helpful, except in specific cases where doing so exercises different code and can therefore uncover regressions that smaller benchmarks will not (this should be rare).
0
rapidsai_public_repos/cudf/cpp/doxygen
rapidsai_public_repos/cudf/cpp/doxygen/developer_guide/TESTING.md
# Unit Testing in libcudf Unit tests in libcudf are written using [Google Test](https://github.com/google/googletest/blob/master/docs/primer.md). **Important:** Instead of including `gtest/gtest.h` directly, use `#include <cudf_test/cudf_gtest.hpp>`. Also, write test code in the global namespace. That is, do not write test code in the `cudf` or the `cudf::test` namespace or their sub-namespaces. Likewise, do not use `using namespace cudf;` or `using namespace cudf::test;` in the global namespace. ## Best Practices: What Should We Test? In general we should test to make sure all code paths are covered. This is not always easy or possible. But generally this means we test all supported combinations of algorithms and data types, and all operators supported by algorithms that support multiple operators (e.g. reductions, groupby). Here are some other guidelines. * In general empty input is not an error in libcudf. Typically empty input results in empty output. Tests should verify this. * Anything that involves manipulating bitmasks (especially hand-rolled kernels) should have tests that check varying number of rows, especially around boundaries like the warp size (32). So, test fewer than 32 rows, more than 32 rows, exactly 32 rows, and greater than 64 rows. * Most algorithms should have one or more tests exercising inputs with a large enough number of rows to require launching multiple thread blocks, especially when values are ultimately communicated between blocks (e.g. reductions). This is especially important for custom kernels but also applies to Thrust and CUB algorithm calls with lambdas / functors. * For anything involving strings or lists, test exhaustive combinations of empty strings/lists, null strings/lists and strings/lists with null elements. * Strings tests should include a mixture of non-ASCII UTF-8 characters like `é` in test data. * Test sliced columns as input (that is, columns that have a nonzero `offset`). This is an easy to forget case. * Tests that verify various forms of "degenerate" column inputs, for example: empty string columns that have no children (not many paths in cudf can generate these but it does happen); columns with zero size but that somehow have non-null data pointers; and struct columns with no children. * Decimal types are not included in the `cudf::test::NumericTypes` type list, but are included in `cudf::test::FixedWidthTypes`, so be careful that tests either include or exclude decimal types as appropriate. ## Directory and File Naming The naming of unit test directories and source files should be consistent with the feature being tested. For example, the tests for APIs in `copying.hpp` should live in `cudf/cpp/tests/copying`. Each feature (or set of related features) should have its own test source file named `<feature>_tests.cu/cpp`. For example, `cudf/cpp/src/copying/scatter.cu` has tests in `cudf/cpp/tests/copying/scatter_tests.cu`. In the interest of improving compile time, whenever possible, test source files should be `.cpp` files because `nvcc` is slower than `gcc` in compiling host code. Note that `thrust::device_vector` includes device code, and so must only be used in `.cu` files. `rmm::device_uvector`, `rmm::device_buffer` and the various `column_wrapper` types described later can be used in `.cpp` files, and are therefore preferred in test code over `thrust::device_vector`. ## Base Fixture All libcudf unit tests should make use of a GTest ["Test Fixture"](https://github.com/google/googletest/blob/master/docs/primer.md#test-fixtures-using-the-same-data-configuration-for-multiple-tests-same-data-multiple-tests). Even if the fixture is empty, it should inherit from the base fixture `cudf::test::BaseFixture` found in `include/cudf_test/base_fixture.hpp`. This ensures that RMM is properly initialized and finalized. `cudf::test::BaseFixture` already inherits from `testing::Test` and therefore it is not necessary for your test fixtures to inherit from it. Example: class MyTestFixture : public cudf::test::BaseFixture {...}; ## Typed Tests In general, libcudf features must work across all of the supported types (there are exceptions e.g. not all binary operations are supported for all types). In order to automate the process of running the same tests across multiple types, we use GTest's [Typed Tests](https://github.com/google/googletest/blob/master/docs/advanced.md#typed-tests). Typed tests allow you to write a test once and run it across a list of types. For example: ```c++ // Fixture must be a template template <typename T> class TypedTestFixture : cudf::test::BaseFixture {...}; using TestTypes = cudf::test:Types<int,float,double>; // Notice custom cudf type list type TYPED_TEST_SUITE(TypedTestFixture, TestTypes); TYPED_TEST(TypedTestFixture, FirstTest){ // Access the current type using `TypeParam` using T = TypeParam; } ``` To specify the list of types to use, instead of GTest's `testing::Types<...>`, libcudf provides `cudf::test::Types<...>` which is a custom, drop-in replacement for `testing::Types`. In this example, all tests using the `TypedTestFixture` fixture will run once for each type in the list defined in `TestTypes` (`int, float, double`). ### Type Lists The list of types that are used in tests should be consistent across all tests. To ensure consistency, several sets of common type lists are provided in `include/cudf_test/type_lists.hpp`. For example, `cudf::test::NumericTypes` is a type list of all numeric types, `FixedWidthTypes` is a list of all fixed-width element types, and `cudf::test::AllTypes` is a list of every element type that libcudf supports. ```c++ #include <cudf_test/type_lists.hpp> // All tests using TypeTestFixture will be invoked once for each numeric type TYPED_TEST_SUITE(TypedTestFixture, cudf::test::NumericTypes); ``` Whenever possible, use one of the type list provided in `include/utilities/test/type_lists.hpp` rather than creating new custom lists. #### Advanced Type Lists Sometimes it is necessary to generate more advanced type lists than the simple lists of single types in the `TypeList` example above. libcudf provides a set of meta-programming utilities in `include/cudf_test/type_list_utilities.hpp` for generating and composing more advanced type lists. For example, it may be useful to generate a *nested* type list where each element in the list is two types. In a nested type list, each element in the list is itself another list. In order to access the `N`th type within the nested list, use `GetType<NestedList, N>`. Imagine testing all possible two-type combinations of `<int,float>`. This could be done manually: ```c++ template <typename TwoTypes> TwoTypesFixture : cudf::test::BaseFixture{...}; using TwoTypesList = Types< Types<int, int>, Types<int, float>, Types<float, int>, Types<float, float> >; TYPED_TEST_SUITE(TwoTypesFixture, TwoTypesList); TYPED_TEST(TwoTypesFixture, FirstTest){ // TypeParam is a list of two types, i.e., a "nested" type list // Use `cudf::test::GetType` to retrieve the individual types using FirstType = GetType<TypeParam,0>; using SecondType = GetType<TypeParam,1>; } ``` The above example manually specifies all pairs composed of `int` and `float`. `CrossProduct` is a utility in `type_list_utilities.hpp` which materializes this cross product automatically. ```c++ using TwoTypesList = Types< Types<int, int>, Types<int, float>, Types<float, int>, Types<float, float> >; using CrossProductTypeList = CrossProduct< Types<int, float>, Types<int, float> >; // TwoTypesList and CrossProductTypeList are identical ``` `CrossProduct` can be used with an arbitrary number of type lists to generate nested type lists of two or more types. **However**, overuse of `CrossProduct` can dramatically inflate compile time. The cross product of two type lists of size `n` and `m` will result in a new list with `n*m` nested type lists. This means `n*m` templates will be instantiated; `n` and `m` need not be large before compile time becomes unreasonable. There are a number of other utilities in `type_list_utilities.hpp`. For more details, see the documentation in that file and their associated tests in `cudf/cpp/tests/utilities_tests/type_list_tests.cpp`. ## Utilities libcudf provides a number of utilities in `include/cudf_test` to make common testing operations more convenient. Before creating your own test utilities, look to see if one already exists that does what you need. If not, consider adding a new utility to do what you need. However, make sure that the utility is generic enough to be useful for other tests and is not overly tailored to your specific testing need. ### Column Wrappers In order to make generating input columns easier, libcudf provides the `*_column_wrapper` classes in `include/cudf_test/column_wrapper.hpp`. These classes wrap a `cudf::column` and provide constructors for initializing a `cudf::column` object usable with libcudf APIs. Any `*_column_wrapper` class is implicitly convertible to a `column_view` or `mutable_column_view` and therefore may be transparently passed to any API expecting a `column_view` or `mutable_column_view` argument. #### fixed_width_column_wrapper The `cudf::test::fixed_width_column_wrapper` class should be used for constructing and initializing columns of any fixed-width element type, e.g., numeric types, timestamp types, Boolean, etc. `cudf::test::fixed_width_column_wrapper` provides constructors that accept an iterator range to generate each element in the column. For nullable columns, an additional iterator can be provided to indicate the validity of each element. There are also constructors that accept a `std::initializer_list<T>` for the column elements and optionally for the validity of each element. Example: ```c++ // Creates a non-nullable column of INT32 elements with 5 elements: {0, 1, 2, 3, 4} auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i;}); cudf::test::fixed_width_column_wrapper<int32_t> w(elements, elements + 5); // Creates a nullable column of INT32 elements with 5 elements: {null, 1, null, 3, null} auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i;}); auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}) cudf::test::fixed_width_column_wrapper<int32_t> w(elements, elements + 5, validity); // Creates a non-nullable INT32 column with 4 elements: {1, 2, 3, 4} cudf::test::fixed_width_column_wrapper<int32_t> w{{1, 2, 3, 4}}; // Creates a nullable INT32 column with 4 elements: {1, NULL, 3, NULL} cudf::test::fixed_width_column_wrapper<int32_t> w{ {1,2,3,4}, {1, 0, 1, 0}}; ``` #### fixed_point_column_wrapper The `cudf::test::fixed_point_column_wrapper` class should be used for constructing and initializing columns of any fixed-point element type (DECIMAL32 or DECIMAL64). `cudf::test::fixed_point_column_wrapper` provides constructors that accept an iterator range to generate each element in the column. For nullable columns, an additional iterator can be provided to indicate the validity of each element. Constructors also take the scale of the fixed-point values to create. Example: ```c++ // Creates a non-nullable column of 4 DECIMAL32 elements of scale 3: {1000, 2000, 3000, 4000} auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){ return i; }); cudf::test::fixed_point_column_wrapper<int32_t> w(elements, elements + 4, 3); // Creates a nullable column of 5 DECIMAL32 elements of scale 2: {null, 100, null, 300, null} auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){ return i; }); auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){ return i % 2; }); cudf::test::fixed_point_column_wrapper<int32_t> w(elements, elements + 5, validity, 2); ``` #### dictionary_column_wrapper The `cudf::test::dictionary_column_wrapper` class should be used to create dictionary columns. `cudf::test::dictionary_column_wrapper` provides constructors that accept an iterator range to generate each element in the column. For nullable columns, an additional iterator can be provided to indicate the validity of each element. There are also constructors that accept a `std::initializer_list<T>` for the column elements and optionally for the validity of each element. Example: ```c++ // Creates a non-nullable dictionary column of INT32 elements with 5 elements // keys = {0, 2, 6}, indices = {0, 1, 1, 2, 2} std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> w(element.begin(), elements.end()); // Creates a nullable dictionary column with 5 elements and a validity iterator. std::vector<int32_t> elements{0, 2, 0, 6, 0}; // Validity iterator here sets even rows to null. auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}) // keys = {2, 6}, indices = {NULL, 0, NULL, 1, NULL} cudf::test::dictionary_column_wrapper<int32_t> w(elements, elements + 5, validity); // Creates a non-nullable dictionary column with 4 elements. // keys = {1, 2, 3}, indices = {0, 1, 2, 0} cudf::test::dictionary_column_wrapper<int32_t> w{{1, 2, 3, 1}}; // Creates a nullable dictionary column with 4 elements and validity initializer. // keys = {1, 3}, indices = {0, NULL, 1, NULL} cudf::test::dictionary_column_wrapper<int32_t> w{ {1, 0, 3, 0}, {1, 0, 1, 0}}; // Creates a nullable column of dictionary elements with 5 elements and validity initializer. std::vector<int32_t> elements{0, 2, 2, 6, 6}; // keys = {2, 6}, indices = {NULL, 0, NULL, 1, NULL} cudf::test::dictionary_width_column_wrapper<int32_t> w(elements, elements + 5, {0, 1, 0, 1, 0}); // Creates a non-nullable dictionary column with 7 string elements std::vector<std::string> strings{"", "aaa", "bbb", "aaa", "bbb", "ccc", "bbb"}; // keys = {"","aaa","bbb","ccc"}, indices = {0, 1, 2, 1, 2, 3, 2} cudf::test::dictionary_column_wrapper<std::string> d(strings.begin(), strings.end()); // Creates a nullable dictionary column with 7 string elements and a validity iterator. // Validity iterator here sets even rows to null. // keys = {"a", "bb"}, indices = {NULL, 1, NULL, 1, NULL, 0, NULL} auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::dictionary_column_wrapper<std::string> d({"", "bb", "", "bb", "", "a", ""}, validity); ``` #### strings_column_wrapper The `cudf::test::strings_column_wrapper` class should be used to create columns of strings. It provides constructors that accept an iterator range to generate each string in the column. For nullable columns, an additional iterator can be provided to indicate the validity of each string. There are also constructors that accept a `std::initializer_list<std::string>` for the column's strings and optionally for the validity of each element. Example: ```c++ // Creates a non-nullable STRING column with 7 string elements: // {"", "this", "is", "a", "column", "of", "strings"} std::vector<std::string> strings{"", "this", "is", "a", "column", "of", "strings"}; cudf::test::strings_column_wrapper s(strings.begin(), strings.end()); // Creates a nullable STRING column with 7 string elements: // {NULL, "this", NULL, "a", NULL, "of", NULL} std::vector<std::string> strings{"", "this", "is", "a", "column", "of", "strings"}; auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::strings_column_wrapper s(strings.begin(), strings.end(), validity); // Creates a non-nullable STRING column with 7 string elements: // {"", "this", "is", "a", "column", "of", "strings"} cudf::test::strings_column_wrapper s({"", "this", "is", "a", "column", "of", "strings"}); // Creates a nullable STRING column with 7 string elements: // {NULL, "this", NULL, "a", NULL, "of", NULL} auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::strings_column_wrapper s({"", "this", "is", "a", "column", "of", "strings"}, validity); ``` #### lists_column_wrapper The `cudf::test::lists_column_wrapper` class should be used to create columns of lists. It provides constructors that accept an iterator range to generate each list in the column. For nullable columns, an additional iterator can be provided to indicate the validity of each list. There are also constructors that accept a `std::initializer_list<T>` for the column's lists and optionally for the validity of each element. A number of other constructors are available. Example: ```c++ // Creates an empty LIST column // [] cudf::test::lists_column_wrapper l{}; // Creates a LIST column with 1 list composed of 2 total integers // [{0, 1}] cudf::test::lists_column_wrapper l{0, 1}; // Creates a LIST column with 3 lists // [{0, 1}, {2, 3}, {4, 5}] cudf::test::lists_column_wrapper l{ {0, 1}, {2, 3}, {4, 5} }; // Creates a LIST of LIST columns with 2 lists on the top level and // 4 below // [ {{0, 1}, {2, 3}}, {{4, 5}, {6, 7}} ] cudf::test::lists_column_wrapper l{ {{0, 1}, {2, 3}}, {{4, 5}, {6, 7}} }; // Creates a LIST column with 1 list composed of 5 total integers // [{0, 1, 2, 3, 4}] auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i*2;}); cudf::test::lists_column_wrapper l(elements, elements+5); // Creates a LIST column with 1 lists composed of 2 total integers // [{0, NULL}] auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::lists_column_wrapper l{{0, 1}, validity}; // Creates a LIST column with 1 lists composed of 5 total integers // [{0, NULL, 2, NULL, 4}] auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i*2;}); auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::lists_column_wrapper l(elements, elements+5, validity); // Creates a LIST column with 1 list composed of 2 total strings // [{"abc", "def"}] cudf::test::lists_column_wrapper l{"abc", "def"}; // Creates a LIST of LIST columns with 2 lists on the top level and 4 below // [ {{0, 1}, NULL}, {{4, 5}, NULL} ] auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i){return i % 2;}); cudf::test::lists_column_wrapper l{ {{{0, 1}, {2, 3}}, validity}, {{{4, 5}, {6, 7}}, validity} }; ``` #### structs_column_wrapper The `cudf::test::structs_column_wrapper` class should be used to create columns of structs. It provides constructors that accept a vector or initializer list of pre-constructed columns or column wrappers for child columns. For nullable columns, an additional iterator can be provided to indicate the validity of each struct. Examples: ```c++ // The following constructs a column for struct< int, string >. auto child_int_col = cudf::test::fixed_width_column_wrapper<int32_t>{ 1, 2, 3, 4, 5 }.release(); auto child_string_col = cudf::test::string_column_wrapper {"All", "the", "leaves", "are", "brown"}.release(); std::vector<std::unique_ptr<cudf::column>> child_columns; child_columns.push_back(std::move(child_int_col)); child_columns.push_back(std::move(child_string_col)); cudf::test::struct_col wrapper wrapper{ child_cols, {1,0,1,0,1} // Validity }; auto struct_col {wrapper.release()}; // The following constructs a column for struct< int, string >. cudf::test::fixed_width_column_wrapper<int32_t> child_int_col_wrapper{ 1, 2, 3, 4, 5 }; cudf::test::string_column_wrapper child_string_col_wrapper {"All", "the", "leaves", "are", "brown"}; cudf::test::struct_column_wrapper wrapper{ {child_int_col_wrapper, child_string_col_wrapper} {1,0,1,0,1} // Validity }; auto struct_col {wrapper.release()}; // The following constructs a column for struct< int, string >. cudf::test::fixed_width_column_wrapper<int32_t> child_int_col_wrapper{ 1, 2, 3, 4, 5 }; cudf::test::string_column_wrapper child_string_col_wrapper {"All", "the", "leaves", "are", "brown"}; cudf::test::struct_column_wrapper wrapper{ {child_int_col_wrapper, child_string_col_wrapper} cudf::detail::make_counting_transform_iterator(0, [](auto i){ return i % 2; }) // Validity }; auto struct_col {wrapper.release()}; ``` ### Column Comparison Utilities A common operation in testing is verifying that two columns are equal, or equivalent, or that they have the same metadata. #### CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUAL Verifies that two columns have the same type, size, and nullability. For nested types, recursively verifies the equality of type, size and nullability of all nested children. #### CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUIVALENT Verifies that two columns have equivalent type and equal size, ignoring nullability. For nested types, recursively verifies the equivalence of type, and equality of size of all nested children, ignoring nullability. Note "equivalent type". Most types are equivalent if and only they are equal. `fixed_point` types are one exception. They are equivalent if the representation type is equal, even if they have different scales. Nested type columns can be equivalent in the case where they both have zero size, but one has children (also empty) and the other does not. For columns with nonzero size, both equals and equivalent expect equal number of children. #### CUDF_TEST_EXPECT_COLUMNS_EQUAL Verifies that two columns have equal properties and verifies elementwise equality of the column data. Null elements are treated as equal. #### CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT Verifies that two columns have equivalent properties and verifies elementwise equivalence of the column data. Null elements are treated as equivalent. #### CUDF_TEST_EXPECT_EQUAL_BUFFERS Verifies the bitwise equality of two device memory buffers. #### Caveats Column comparison functions in the `cudf::test::detail` namespace should **NOT** be used directly. ### Printing and accessing column data `include/cudf_test/column_utilities.hpp` defines various functions and overloads for printing columns (`print`), converting column data to string (`to_string`, `to_strings`), and copying data to the host (`to_host`). ## Validating Stream Usage ### Background libcudf employs a custom-built [preload library docs](https://man7.org/linux/man-pages/man8/ld.so.8.html) to validate its internal stream usage (the code may be found [`here`](https://github.com/rapidsai/cudf/blob/main/cpp/tests/utilities/identify_stream_usage.cpp)). This library wraps every asynchronous CUDA runtime API call that accepts a stream with a check to ensure that the passed CUDA stream is a valid one, immediately throwing an exception if an invalid stream is detected. Running tests with this library loaded immediately triggers errors if any test accidentally runs code on an invalid stream. Stream validity is determined by overloading the definition of libcudf's default stream. Normally, in libcudf `cudf::get_default_stream` returns one of `rmm`'s default stream values (depending on whether or not libcudf is compiled with per thread default stream enabled). In the preload library, this function is redefined to instead return a new user-created stream managed using a function-local static `rmm::cuda_stream`. An invalid stream in this situation is defined as any of CUDA's default stream values (cudaStreamLegacy, cudaStreamDefault, or cudaStreamPerThread), since any kernel that properly uses `cudf::get_default_stream` will now instead be using the custom stream created by the preload library. The preload library supports two different modes, `cudf` mode and `testing` mode. The previous paragraph describes the behavior of `cudf` mode, where `cudf::get_default_stream` is overloaded. In `cudf` mode, the preload library ensures that all CUDA runtime APIs are being provided cudf's default stream. This will detect oversights where, for example, a Thrust call has no stream specified, or when one of CUDA's default stream values is explicitly specified to a kernel. However, it will not detect cases where a stream is not correctly forwarded down the call stack, for instance if some `detail` function that accepts a stream parameter fails to forward it along and instead erroneously calls `cudf::get_default_stream` instead. In `testing` mode, the library instead overloads `cudf::test::get_default_stream`. This function defined in the `cudf::test` namespace enables a more stringent mode of testing. In `testing` mode, the preload library instead verifies that all CUDA runtime APIs are instead called using the test namespace's default stream. This distinction is important because cudf internals never use `cudf::test::get_default_stream`, so this stream value can only appear internally if it was provided to a public API and forwarded properly all the way down the call stack. While `testing` mode is more strict than `cudf` mode, it is also more intrusive. `cudf` mode can operate with no changes to the library or the tests because the preload library overwrites the relevant APIs in place. `testing` mode, however, can only be used to validate tests that are correctly passing `cudf::test::get_default_stream` to public libcudf APIs. In addition to the preload library, the test suite also implements a [custom memory resource](https://github.com/rapidsai/cudf/blob/main/cpp/include/cudf_test/stream_checking_resource_adaptor.hpp) that performs analogous stream verification when its `do_allocate` method is called. During testing this rmm's default memory resource is set to use this adaptor for additional stream validation. ### Usage When writing tests for a libcudf API, a special set of additional tests should be added to validate the API's stream usage. These tests should be placed in the `cpp/tests/streams` directory in a file corresponding to the header containing the tested APIs, e.g. `cpp/tests/streams/copying_test.cpp` for all APIs declared in `cpp/include/cudf/copying.hpp`. These tests should contain a minimal invocation of the tested API with no additional assertions since they are solely designed to check stream usage. When adding these tests to `cpp/tests/CMakeLists.txt`, the `ConfigureTest` CMake function should be provided the arguments `STREAM_MODE testing`. This change is sufficient for CTest to set up the test to automatically load the preload library compiled in `testing` mode when running the test. The rest of the test suite is configured to run with the preload library in `cudf` mode. As a result, all test runs with `ctest` will always include stream validation. Since this configuration is managed via CMake and CTest, direct execution of the test executables will not use the preload library at all. Tests will still run and pass normally in this situation, however (with the exception of the test of the preload library itself).
0
rapidsai_public_repos/cudf/cpp/doxygen
rapidsai_public_repos/cudf/cpp/doxygen/developer_guide/DEVELOPER_GUIDE.md
# libcudf C++ Developer Guide {#DEVELOPER_GUIDE} This document serves as a guide for contributors to libcudf C++ code. Developers should also refer to these additional files for further documentation of libcudf best practices. * [Documentation Guide](DOCUMENTATION.md) for guidelines on documenting libcudf code. * [Testing Guide](TESTING.md) for guidelines on writing unit tests. * [Benchmarking Guide](BENCHMARKING.md) for guidelines on writing unit benchmarks. # Overview libcudf is a C++ library that provides GPU-accelerated data-parallel algorithms for processing column-oriented tabular data. libcudf provides algorithms including slicing, filtering, sorting, various types of aggregations, and database-type operations such as grouping and joins. libcudf serves a number of clients via multiple language interfaces, including Python and Java. Users may also use libcudf directly from C++ code. ## Lexicon This section defines terminology used within libcudf. ### Column A column is an array of data of a single type. Along with Tables, columns are the fundamental data structures used in libcudf. Most libcudf algorithms operate on columns. Columns may have a validity mask representing whether each element is valid or null (invalid). Columns of nested types are supported, meaning that a column may have child columns. A column is the C++ equivalent to a cuDF Python [Series](https://docs.rapids.ai/api/cudf/stable/api_docs/series.html). ### Element An individual data item within a column. Also known as a row. ### Scalar A type representing a single element of a data type. ### Table A table is a collection of columns with equal number of elements. A table is the C++ equivalent to a cuDF Python [DataFrame](https://docs.rapids.ai/api/cudf/stable/api_docs/dataframe.html). ### View A view is a non-owning object that provides zero-copy access (possibly with slicing or offsets) to data owned by another object. Examples are column views and table views. # Directory Structure and File Naming External/public libcudf APIs are grouped based on functionality into an appropriately titled header file in `cudf/cpp/include/cudf/`. For example, `cudf/cpp/include/cudf/copying.hpp` contains the APIs for functions related to copying from one column to another. Note the `.hpp` file extension used to indicate a C++ header file. Header files should use the `#pragma once` include guard. The naming of external API headers should be consistent with the name of the folder that contains the source files that implement the API. For example, the implementation of the APIs found in `cudf/cpp/include/cudf/copying.hpp` are located in `cudf/src/copying`. Likewise, the unit tests for the APIs reside in `cudf/tests/copying/`. Internal API headers containing `detail` namespace definitions that are used across translation units inside libcudf should be placed in `include/cudf/detail`. ## File extensions - `.hpp` : C++ header files - `.cpp` : C++ source files - `.cu` : CUDA C++ source files - `.cuh` : Headers containing CUDA device code Only use `.cu` and `.cuh` if necessary. A good indicator is the inclusion of `__device__` and other symbols that are only recognized by `nvcc`. Another indicator is Thrust algorithm APIs with a device execution policy (always `rmm::exec_policy` in libcudf). ## Code and Documentation Style and Formatting libcudf code uses [snake_case](https://en.wikipedia.org/wiki/Snake_case) for all names except in a few cases: template parameters, unit tests and test case names may use Pascal case, aka [UpperCamelCase](https://en.wikipedia.org/wiki/Camel_case). We do not use [Hungarian notation](https://en.wikipedia.org/wiki/Hungarian_notation), except sometimes when naming device data variables and their corresponding host copies. Private member variables are typically prefixed with an underscore. ```c++ template <typename IteratorType> void algorithm_function(int x, rmm::cuda_stream_view s, rmm::device_memory_resource* mr) { ... } class utility_class { ... private: int _rating{}; std::unique_ptr<cudf::column> _column{}; } TYPED_TEST_SUITE(RepeatTypedTestFixture, cudf::test::FixedWidthTypes); TYPED_TEST(RepeatTypedTestFixture, RepeatScalarCount) { ... } ``` C++ formatting is enforced using `clang-format`. You should configure `clang-format` on your machine to use the `cudf/cpp/.clang-format` configuration file, and run `clang-format` on all changed code before committing it. The easiest way to do this is to configure your editor to "format on save." Aspects of code style not discussed in this document and not automatically enforceable are typically caught during code review, or not enforced. ### C++ Guidelines In general, we recommend following [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). We also recommend watching Sean Parent's [C++ Seasoning talk](https://www.youtube.com/watch?v=W2tWOdzgXHA), and we try to follow his rules: "No raw loops. No raw pointers. No raw synchronization primitives." * Prefer algorithms from STL and Thrust to raw loops. * Prefer libcudf and RMM [owning data structures and views](#libcudf-data-structures) to raw pointers and raw memory allocation. * libcudf doesn't have a lot of CPU-thread concurrency, but there is some. And currently libcudf does use raw synchronization primitives. So we should revisit Parent's third rule and improve here. Additional style guidelines for libcudf code include: * [NL.11: Make Literals Readable](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#nl11-make-literals-readable): Decimal values should use integer separators every thousands place, like `1'234'567`. Hexadecimal values should use separators every 4 characters, like `0x0123'ABCD`. Documentation is discussed in the [Documentation Guide](DOCUMENTATION.md). ### Includes The following guidelines apply to organizing `#include` lines. * Group includes by library (e.g. cuDF, RMM, Thrust, STL). `clang-format` will respect the groupings and sort the individual includes within a group lexicographically. * Separate groups by a blank line. * Order the groups from "nearest" to "farthest". In other words, local includes, then includes from other RAPIDS libraries, then includes from related libraries, like `<thrust/...>`, then includes from dependencies installed with cuDF, and then standard headers (for example `<string>`, `<iostream>`). * Use `<>` instead of `""` unless the header is in the same directory as the source file. * Tools like `clangd` often auto-insert includes when they can, but they usually get the grouping and brackets wrong. * Always check that includes are only necessary for the file in which they are included. Try to avoid excessive including especially in header files. Double check this when you remove code. * Use quotes `"` to include local headers from the same relative source directory. This should only occur in source files and non-public header files. Otherwise use angle brackets `<>` around included header filenames. * Avoid relative paths with `..` when possible. Paths with `..` are necessary when including (internal) headers from source paths not in the same directory as the including file, because source paths are not passed with `-I`. * Avoid including library internal headers from non-internal files. For example, try not to include headers from libcudf `src` directories in tests or in libcudf public headers. If you find yourself doing this, start a discussion about moving (parts of) the included internal header to a public header. # libcudf Data Structures Application data in libcudf is contained in Columns and Tables, but there are a variety of other data structures you will use when developing libcudf code. ## Views and Ownership Resource ownership is an essential concept in libcudf. In short, an "owning" object owns a resource (such as device memory). It acquires that resource during construction and releases the resource in destruction ([RAII](https://en.cppreference.com/w/cpp/language/raii)). A "non-owning" object does not own resources. Any class in libcudf with the `*_view` suffix is non-owning. For more detail see the [`libcudf` presentation.](https://docs.google.com/presentation/d/1zKzAtc1AWFKfMhiUlV5yRZxSiPLwsObxMlWRWz_f5hA/edit?usp=sharing) libcudf functions typically take views as input (`column_view` or `table_view`) and produce `unique_ptr`s to owning objects as output. For example, ```c++ std::unique_ptr<table> sort(table_view const& input); ``` ## rmm::device_memory_resource libcudf allocates all device memory via RMM memory resources (MR). See the [RMM documentation](https://github.com/rapidsai/rmm/blob/main/README.md) for details. ### Current Device Memory Resource RMM provides a "default" memory resource for each device that can be accessed and updated via the `rmm::mr::get_current_device_resource()` and `rmm::mr::set_current_device_resource(...)` functions, respectively. All memory resource parameters should be defaulted to use the return value of `rmm::mr::get_current_device_resource()`. ## cudf::column `cudf::column` is a core owning data structure in libcudf. Most libcudf public APIs produce either a `cudf::column` or a `cudf::table` as output. A `column` contains `device_buffer`s which own the device memory for the elements of a column and an optional null indicator bitmask. Implicitly convertible to `column_view` and `mutable_column_view`. Movable and copyable. A copy performs a deep copy of the column's contents, whereas a move moves the contents from one column to another. Example: ```c++ cudf::column col{...}; cudf::column copy{col}; // Copies the contents of `col` cudf::column const moved_to{std::move(col)}; // Moves contents from `col` column_view v = moved_to; // Implicit conversion to non-owning column_view // mutable_column_view m = moved_to; // Cannot create mutable view to const column ``` A `column` may have nested (child) columns, depending on the data type of the column. For example, `LIST`, `STRUCT`, and `STRING` type columns. ### cudf::column_view `cudf::column_view` is a core non-owning data structure in libcudf. It is an immutable, non-owning view of device memory as a column. Most libcudf public APIs take views as inputs. A `column_view` may be a view of a "slice" of a column. For example, it might view rows 75-150 of a column with 1000 rows. The `size()` of this `column_view` would be `75`, and accessing index `0` of the view would return the element at index `75` of the owning `column`. Internally, this is implemented by storing in the view a pointer, an offset, and a size. `column_view::data<T>()` returns a pointer iterator to `column_view::head<T>() + offset`. ### cudf::mutable_column_view A *mutable*, non-owning view of device memory as a column. Used for detail APIs and (rare) public APIs that modify columns in place. ### cudf::column_device_view An immutable, non-owning view of device data as a column of elements that is trivially copyable and usable in CUDA device code. Used to pass `column_view` data as input to CUDA kernels and device functions (including Thrust algorithms) ### cudf::mutable_column_device_view A mutable, non-owning view of device data as a column of elements that is trivially copyable and usable in CUDA device code. Used to pass `column_view` data to be modified on the device by CUDA kernels and device functions (including Thrust algorithms). ## cudf::table Owning class for a set of `cudf::column`s all with equal number of elements. This is the C++ equivalent to a data frame. Implicitly convertible to `cudf::table_view` and `cudf::mutable_table_view` Movable and copyable. A copy performs a deep copy of all columns, whereas a move moves all columns from one table to another. ### cudf::table_view An *immutable*, non-owning view of a table. ### cudf::mutable_table_view A *mutable*, non-owning view of a table. ## cudf::size_type The `cudf::size_type` is the type used for the number of elements in a column, offsets to elements within a column, indices to address specific elements, segments for subsets of column elements, etc. It is equivalent to a signed, 32-bit integer type and therefore has a maximum value of 2147483647. Some APIs also accept negative index values and those functions support a minimum value of -2147483648. This fundamental type also influences output values not just for column size limits but for counting elements as well. ## Spans libcudf provides `span` classes that mimic C++20 `std::span`, which is a lightweight view of a contiguous sequence of objects. libcudf provides two classes, `host_span` and `device_span`, which can be constructed from multiple container types, or from a pointer (host or device, respectively) and size, or from iterators. `span` types are useful for defining generic (internal) interfaces which work with multiple input container types. `device_span` can be constructed from `thrust::device_vector`, `rmm::device_vector`, or `rmm::device_uvector`. `host_span` can be constructed from `thrust::host_vector`, `std::vector`, or `std::basic_string`. If you are defining internal (detail) functions that operate on vectors, use spans for the input vector parameters rather than a specific vector type, to make your functions more widely applicable. When a `span` refers to immutable elements, use `span<T const>`, not `span<T> const`. Since a span is lightweight view, it does not propagate `const`-ness. Therefore, `const` should be applied to the template type parameter, not to the `span` itself. Also, `span` should be passed by value because it is a lightweight view. APIS in libcudf that take spans as input will look like the following function that copies device data to a host `std::vector`. ```c++ template <typename T> std::vector<T> make_std_vector_async(device_span<T const> v, rmm::cuda_stream_view stream) ``` ## cudf::scalar A `cudf::scalar` is an object that can represent a singular, nullable value of any of the types currently supported by cudf. Each type of value is represented by a separate type of scalar class which are all derived from `cudf::scalar`. e.g. A `numeric_scalar` holds a single numerical value, a `string_scalar` holds a single string. The data for the stored value resides in device memory. A `list_scalar` holds the underlying data of a single list. This means the underlying data can be any type that cudf supports. For example, a `list_scalar` representing a list of integers stores a `cudf::column` of type `INT32`. A `list_scalar` representing a list of lists of integers stores a `cudf::column` of type `LIST`, which in turn stores a column of type `INT32`. |Value type|Scalar class|Notes| |-|-|-| |fixed-width|`fixed_width_scalar<T>`| `T` can be any fixed-width type| |numeric|`numeric_scalar<T>` | `T` can be `int8_t`, `int16_t`, `int32_t`, `int_64_t`, `float` or `double`| |fixed-point|`fixed_point_scalar<T>` | `T` can be `numeric::decimal32` or `numeric::decimal64`| |timestamp|`timestamp_scalar<T>` | `T` can be `timestamp_D`, `timestamp_s`, etc.| |duration|`duration_scalar<T>` | `T` can be `duration_D`, `duration_s`, etc.| |string|`string_scalar`| This class object is immutable| |list|`list_scalar`| Underlying data can be any type supported by cudf | ### Construction `scalar`s can be created using either their respective constructors or using factory functions like `make_numeric_scalar()`, `make_timestamp_scalar()` or `make_string_scalar()`. ### Casting All the factory methods return a `unique_ptr<scalar>` which needs to be statically downcasted to its respective scalar class type before accessing its value. Their validity (nullness) can be accessed without casting. Generally, the value needs to be accessed from a function that is aware of the value type e.g. a functor that is dispatched from `type_dispatcher`. To cast to the requisite scalar class type given the value type, use the mapping utility `scalar_type_t` provided in `type_dispatcher.hpp` : ```c++ //unique_ptr<scalar> s = make_numeric_scalar(...); using ScalarType = cudf::scalar_type_t<T>; // ScalarType is now numeric_scalar<T> auto s1 = static_cast<ScalarType *>(s.get()); ``` ### Passing to device Each scalar type, except `list_scalar`, has a corresponding non-owning device view class which allows access to the value and its validity from the device. This can be obtained using the function `get_scalar_device_view(ScalarType s)`. Note that a device view is not provided for a base scalar object, only for the derived typed scalar class objects. The underlying data for `list_scalar` can be accessed via `view()` method. For non-nested data, the device view can be obtained via function `column_device_view::create(column_view)`. For nested data, a specialized device view for list columns can be constructed via `lists_column_device_view(column_device_view)`. # libcudf Policies and Design Principles `libcudf` is designed to provide thread-safe, single-GPU accelerated algorithm primitives for solving a wide variety of problems that arise in data science. APIs are written to execute on the default GPU, which can be controlled by the caller through standard CUDA device APIs or environment variables like `CUDA_VISIBLE_DEVICES`. Our goal is to enable diverse use cases like Spark or Pandas to benefit from the performance of GPUs, and libcudf relies on these higher-level layers like Spark or Dask to orchestrate multi-GPU tasks. To best satisfy these use-cases, libcudf prioritizes performance and flexibility, which sometimes may come at the cost of convenience. While we welcome users to use libcudf directly, we design with the expectation that most users will be consuming libcudf through higher-level layers like Spark or cuDF Python that handle some of details that direct users of libcudf must handle on their own. We document these policies and the reasons behind them here. ## libcudf does not introspect data libcudf APIs generally do not perform deep introspection and validation of input data. There are numerous reasons for this: 1. It violates the single responsibility principle: validation is separate from execution. 2. Since libcudf data structures store data on the GPU, any validation incurs _at minimum_ the overhead of a kernel launch, and may in general be prohibitively expensive. 3. API promises around data introspection often significantly complicate implementation. Users are therefore responsible for passing valid data into such APIs. _Note that this policy does not mean that libcudf performs no validation whatsoever_. libcudf APIs should still perform any validation that does not require introspection. To give some idea of what should or should not be validated, here are (non-exhaustive) lists of examples. **Things that libcudf should validate**: - Input column/table sizes or data types **Things that libcudf should not validate**: - Integer overflow - Ensuring that outputs will not exceed the [2GB size](#cudfsize_type) limit for a given set of inputs ## libcudf expects nested types to have sanitized null masks Various libcudf APIs accepting columns of nested data types (such as `LIST` or `STRUCT`) may assume that these columns have been sanitized. In this context, sanitization refers to ensuring that the null elements in a column with a nested dtype are compatible with the elements of nested columns. Specifically: - Null elements of list columns should also be empty. The starting offset of a null element should be equal to the ending offset. - Null elements of struct columns should also be null elements in the underlying structs. - For compound columns, nulls should only be present at the level of the parent column. Child columns should not contain nulls. - Slice operations on nested columns do not propagate offsets to child columns. libcudf APIs _should_ promise to never return "dirty" columns, i.e. columns containing unsanitized data. Therefore, the only problem is if users construct input columns that are not correctly sanitized and then pass those into libcudf APIs. ## Treat libcudf APIs as if they were asynchronous libcudf APIs called on the host do not guarantee that the stream is synchronized before returning. Work in libcudf occurs on `cudf::get_default_stream().value`, which defaults to the CUDA default stream (stream 0). Note that the stream 0 behavior differs if [per-thread default stream is enabled](https://docs.nvidia.com/cuda/cuda-runtime-api/stream-sync-behavior.html) via `CUDF_USE_PER_THREAD_DEFAULT_STREAM`. Any data provided to or returned by libcudf that uses a separate non-blocking stream requires synchronization with the default libcudf stream to ensure stream safety. ## libcudf generally does not make ordering guarantees Functions like merge or groupby in libcudf make no guarantees about the order of entries in the output. Promising deterministic ordering is not, in general, conducive to fast parallel algorithms. Calling code is responsible for performing sorts after the fact if sorted outputs are needed. ## libcudf does not promise specific exception messages libcudf documents the exceptions that will be thrown by an API for different kinds of invalid inputs. The types of those exceptions (e.g. `cudf::logic_error`) are part of the public API. However, the explanatory string returned by the `what` method of those exceptions is not part of the API and is subject to change. Calling code should not rely on the contents of libcudf error messages to determine the nature of the error. For information on the types of exceptions that libcudf throws under different circumstances, see the [section on error handling](#errors). # libcudf API and Implementation ## Streams libcudf is in the process of adding support for asynchronous execution using CUDA streams. In order to facilitate the usage of streams, all new libcudf APIs that allocate device memory or execute a kernel should accept an `rmm::cuda_stream_view` parameter at the end with a default value of `cudf::get_default_stream()`. There is one exception to this rule: if the API also accepts a memory resource parameter, the stream parameter should be placed just *before* the memory resource. This API should then forward the call to a corresponding `detail` API with an identical signature, except that the `detail` API should not have a default parameter for the stream ([detail APIs should always avoid default parameters](#default-parameters)). The implementation should be wholly contained in the `detail` API definition and use only asynchronous versions of CUDA APIs with the stream parameter. In order to make the `detail` API callable from other libcudf functions, it should be exposed in a header placed in the `cudf/cpp/include/detail/` directory. For example: ```c++ // cpp/include/cudf/header.hpp void external_function(...); // cpp/include/cudf/detail/header.hpp namespace detail{ void external_function(..., rmm::cuda_stream_view stream) } // namespace detail // cudf/src/implementation.cpp namespace detail{ // Use the stream parameter in the detail implementation. void external_function(..., rmm::cuda_stream_view stream){ // Implementation uses the stream with async APIs. rmm::device_buffer buff(...,stream); CUDF_CUDA_TRY(cudaMemcpyAsync(...,stream.value())); kernel<<<..., stream>>>(...); thrust::algorithm(rmm::exec_policy(stream), ...); } } // namespace detail void external_function(...){ CUDF_FUNC_RANGE(); // Generates an NVTX range for the lifetime of this function. detail::external_function(..., cudf::get_default_stream()); } ``` **Note:** It is important to synchronize the stream if *and only if* it is necessary. For example, when a non-pointer value is returned from the API that is the result of an asynchronous device-to-host copy, the stream used for the copy should be synchronized before returning. However, when a column is returned, the stream should not be synchronized because doing so will break asynchrony. **Note:** `cudaDeviceSynchronize()` should *never* be used. This limits the ability to do any multi-stream/multi-threaded work with libcudf APIs. ### Stream Creation There may be times in implementing libcudf features where it would be advantageous to use streams *internally*, i.e., to accomplish overlap in implementing an algorithm. However, dynamically creating a stream can be expensive. RMM has a stream pool class to help avoid dynamic stream creation. However, this is not yet exposed in libcudf, so for the time being, libcudf features should avoid creating streams (even if it is slightly less efficient). It is a good idea to leave a `// TODO:` note indicating where using a stream would be beneficial. ## Memory Allocation Device [memory resources](#rmmdevice_memory_resource) are used in libcudf to abstract and control how device memory is allocated. ### Output Memory Any libcudf API that allocates memory that is *returned* to a user must accept a pointer to a `device_memory_resource` as the last parameter. Inside the API, this memory resource must be used to allocate any memory for returned objects. It should therefore be passed into functions whose outputs will be returned. Example: ```c++ // Returned `column` contains newly allocated memory, // therefore the API must accept a memory resource pointer std::unique_ptr<column> returns_output_memory( ..., rmm::device_memory_resource * mr = rmm::mr::get_current_device_resource()); // This API does not allocate any new *output* memory, therefore // a memory resource is unnecessary void does_not_allocate_output_memory(...); ``` This rule automatically applies to all detail APIs that allocates memory. Any detail API may be called by any public API, and therefore could be allocating memory that is returned to the user. To support such uses cases, all detail APIs allocating memory resources should accept an `mr` parameter. Callers are responsible for either passing through a provided `mr` or `rmm::mr::get_current_device_resource()` as needed. ### Temporary Memory Not all memory allocated within a libcudf API is returned to the caller. Often algorithms must allocate temporary, scratch memory for intermediate results. Always use the default resource obtained from `rmm::mr::get_current_device_resource()` for temporary memory allocations. Example: ```c++ rmm::device_buffer some_function( ..., rmm::mr::device_memory_resource mr * = rmm::mr::get_current_device_resource()) { rmm::device_buffer returned_buffer(..., mr); // Returned buffer uses the passed in MR ... rmm::device_buffer temporary_buffer(...); // Temporary buffer uses default MR ... return returned_buffer; } ``` ### Memory Management libcudf code generally eschews raw pointers and direct memory allocation. Use RMM classes built to use `device_memory_resource`s for device memory allocation with automated lifetime management. #### rmm::device_buffer Allocates a specified number of bytes of untyped, uninitialized device memory using a `device_memory_resource`. If no resource is explicitly provided, uses `rmm::mr::get_current_device_resource()`. `rmm::device_buffer` is movable and copyable on a stream. A copy performs a deep copy of the `device_buffer`'s device memory on the specified stream, whereas a move moves ownership of the device memory from one `device_buffer` to another. ```c++ // Allocates at least 100 bytes of uninitialized device memory // using the specified resource and stream rmm::device_buffer buff(100, stream, mr); void * raw_data = buff.data(); // Raw pointer to underlying device memory // Deep copies `buff` into `copy` on `stream` rmm::device_buffer copy(buff, stream); // Moves contents of `buff` into `moved_to` rmm::device_buffer moved_to(std::move(buff)); custom_memory_resource *mr...; // Allocates 100 bytes from the custom_memory_resource rmm::device_buffer custom_buff(100, mr, stream); ``` #### rmm::device_scalar<T> Allocates a single element of the specified type initialized to the specified value. Use this for scalar input/outputs into device kernels, e.g., reduction results, null count, etc. This is effectively a convenience wrapper around a `rmm::device_vector<T>` of length 1. ```c++ // Allocates device memory for a single int using the specified resource and stream // and initializes the value to 42 rmm::device_scalar<int> int_scalar{42, stream, mr}; // scalar.data() returns pointer to value in device memory kernel<<<...>>>(int_scalar.data(),...); // scalar.value() synchronizes the scalar's stream and copies the // value from device to host and returns the value int host_value = int_scalar.value(); ``` #### rmm::device_vector<T> Allocates a specified number of elements of the specified type. If no initialization value is provided, all elements are default initialized (this incurs a kernel launch). **Note**: We have removed all usage of `rmm::device_vector` and `thrust::device_vector` from libcudf, and you should not use it in new code in libcudf without careful consideration. Instead, use `rmm::device_uvector` along with the utility factories in `device_factories.hpp`. These utilities enable creation of `uvector`s from host-side vectors, or creating zero-initialized `uvector`s, so that they are as convenient to use as `device_vector`. Avoiding `device_vector` has a number of benefits, as described in the following section on `rmm::device_uvector`. #### rmm::device_uvector<T> Similar to a `device_vector`, allocates a contiguous set of elements in device memory but with key differences: - As an optimization, elements are uninitialized and no synchronization occurs at construction. This limits the types `T` to trivially copyable types. - All operations are stream ordered (i.e., they accept a `cuda_stream_view` specifying the stream on which the operation is performed). This improves safety when using non-default streams. - `device_uvector.hpp` does not include any `__device__` code, unlike `thrust/device_vector.hpp`, which means `device_uvector`s can be used in `.cpp` files, rather than just in `.cu` files. ```c++ cuda_stream s; // Allocates uninitialized storage for 100 `int32_t` elements on stream `s` using the // default resource rmm::device_uvector<int32_t> v(100, s); // Initializes the elements to 0 thrust::uninitialized_fill(thrust::cuda::par.on(s.value()), v.begin(), v.end(), int32_t{0}); rmm::mr::device_memory_resource * mr = new my_custom_resource{...}; // Allocates uninitialized storage for 100 `int32_t` elements on stream `s` using the resource `mr` rmm::device_uvector<int32_t> v2{100, s, mr}; ``` ## Default Parameters While public libcudf APIs are free to include default function parameters, detail functions should not. Default memory resource parameters make it easy for developers to accidentally allocate memory using the incorrect resource. Avoiding default memory resources forces developers to consider each memory allocation carefully. While streams are not currently exposed in libcudf's API, we plan to do so eventually. As a result, the same reasons for memory resources also apply to streams. Public APIs default to using `cudf::get_default_stream()`. However, including the same default in detail APIs opens the door for developers to forget to pass in a user-provided stream if one is passed to a public API. Forcing every detail API call to explicitly pass a stream is intended to prevent such mistakes. The memory resources (and eventually, the stream) are the final parameters for essentially all public APIs. For API consistency, the same is true throughout libcudf's internals. Therefore, a consequence of not allowing default streams or MRs is that no parameters in detail APIs may have defaults. ## NVTX Ranges In order to aid in performance optimization and debugging, all compute intensive libcudf functions should have a corresponding NVTX range. libcudf has a convenience macro `CUDF_FUNC_RANGE()` that automatically annotates the lifetime of the enclosing function and uses the function's name as the name of the NVTX range. For more information about NVTX, see [here](https://github.com/NVIDIA/NVTX/tree/dev/c). ## Input/Output Style The preferred style for how inputs are passed in and outputs are returned is the following: - Inputs - Columns: - `column_view const&` - Tables: - `table_view const&` - Scalar: - `scalar const&` - Everything else: - Trivial or inexpensively copied types - Pass by value - Non-trivial or expensive to copy types - Pass by `const&` - In/Outs - Columns: - `mutable_column_view&` - Tables: - `mutable_table_view&` - Everything else: - Pass by via raw pointer - Outputs - Outputs should be *returned*, i.e., no output parameters - Columns: - `std::unique_ptr<column>` - Tables: - `std::unique_ptr<table>` - Scalars: - `std::unique_ptr<scalar>` ### Multiple Return Values Sometimes it is necessary for functions to have multiple outputs. There are a few ways this can be done in C++ (including creating a `struct` for the output). One convenient way to do this is using `std::tie` and `std::pair`. Note that objects passed to `std::pair` will invoke either the copy constructor or the move constructor of the object, and it may be preferable to move non-trivially copyable objects (and required for types with deleted copy constructors, like `std::unique_ptr`). ```c++ std::pair<table, table> return_two_tables(void){ cudf::table out0; cudf::table out1; ... // Do stuff with out0, out1 // Return a std::pair of the two outputs return std::pair(std::move(out0), std::move(out1)); } cudf::table out0; cudf::table out1; std::tie(out0, out1) = cudf::return_two_outputs(); ``` Note: `std::tuple` _could_ be used if not for the fact that Cython does not support `std::tuple`. Therefore, libcudf APIs must use `std::pair`, and are therefore limited to return only two objects of different types. Multiple objects of the same type may be returned via a `std::vector<T>`. Alternatively, with C++17 (supported from cudf v0.20), [structured binding](https://en.cppreference.com/w/cpp/language/structured_binding) may be used to disaggregate multiple return values: ```c++ auto [out0, out1] = cudf::return_two_outputs(); ``` Note that the compiler might not support capturing aliases defined in a structured binding in a lambda. One may work around this by using a capture with an initializer instead: ```c++ auto [out0, out1] = cudf::return_two_outputs(); // Direct capture of alias from structured binding might fail with: // "error: structured binding cannot be captured" // auto foo = [out0]() {...}; // Use an initializing capture: auto foo = [&out0 = out0] { // Use out0 to compute something. // ... }; ``` ## Iterator-based interfaces Increasingly, libcudf is moving toward internal (`detail`) APIs with iterator parameters rather than explicit `column`/`table`/`scalar` parameters. As with STL, iterators enable generic algorithms to be applied to arbitrary containers. A good example of this is `cudf::copy_if_else`. This function takes two inputs, and a Boolean mask. It copies the corresponding element from the first or second input depending on whether the mask at that index is `true` or `false`. Implementing `copy_if_else` for all combinations of `column` and `scalar` parameters is simplified by using iterators in the `detail` API. ```c++ template <typename FilterFn, typename LeftIter, typename RightIter> std::unique_ptr<column> copy_if_else( bool nullable, LeftIter lhs_begin, LeftIter lhs_end, RightIter rhs, FilterFn filter, ...); ``` `LeftIter` and `RightIter` need only implement the necessary interface for an iterator. libcudf provides a number of iterator types and utilities that are useful with iterator-based APIs from libcudf as well as Thrust algorithms. Most are defined in `include/detail/iterator.cuh`. ### Pair iterator The pair iterator is used to access elements of nullable columns as a pair containing an element's value and validity. `cudf::detail::make_pair_iterator` can be used to create a pair iterator from a `column_device_view` or a `cudf::scalar`. `make_pair_iterator` is not available for `mutable_column_device_view`. ### Null-replacement iterator This iterator replaces the null/validity value for each element with a specified constant (`true` or `false`). Created using `cudf::detail::make_null_replacement_iterator`. ### Validity iterator This iterator returns the validity of the underlying element (`true` or `false`). Created using `cudf::detail::make_validity_iterator`. ### Index-normalizing iterators The proliferation of data types supported by libcudf can result in long compile times. One area where compile time was a problem is in types used to store indices, which can be any integer type. The "Indexalator", or index-normalizing iterator (`include/cudf/detail/indexalator.cuh`), can be used for index types (integers) without requiring a type-specific instance. It can be used for any iterator interface for reading an array of integer values of type `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, or `uint64`. Reading specific elements always returns a [`cudf::size_type`](#cudfsize_type) integer. Use the `indexalator_factory` to create an appropriate input iterator from a column_view. Example input iterator usage: ```c++ auto begin = indexalator_factory::create_input_iterator(gather_map); auto end = begin + gather_map.size(); auto result = detail::gather( source, begin, end, IGNORE, stream, mr ); ``` Example output iterator usage: ```c++ auto result_itr = indexalator_factory::create_output_iterator(indices->mutable_view()); thrust::lower_bound(rmm::exec_policy(stream), input->begin<Element>(), input->end<Element>(), values->begin<Element>(), values->end<Element>(), result_itr, thrust::less<Element>()); ``` ## Namespaces ### External All public libcudf APIs should be placed in the `cudf` namespace. Example: ```c++ namespace cudf{ void public_function(...); } // namespace cudf ``` The top-level `cudf` namespace is sufficient for most of the public API. However, to logically group a broad set of functions, further namespaces may be used. For example, there are numerous functions that are specific to columns of Strings. These functions reside in the `cudf::strings::` namespace. Similarly, functionality used exclusively for unit testing is in the `cudf::test::` namespace. ### Internal Many functions are not meant for public use, so place them in either the `detail` or an *anonymous* namespace, depending on the situation. #### detail namespace Functions or objects that will be used across *multiple* translation units (i.e., source files), should be exposed in an internal header file and placed in the `detail` namespace. Example: ```c++ // some_utilities.hpp namespace cudf{ namespace detail{ void reusable_helper_function(...); } // namespace detail } // namespace cudf ``` #### Anonymous namespace Functions or objects that will only be used in a *single* translation unit should be defined in an *anonymous* namespace in the source file where it is used. Example: ```c++ // some_file.cpp namespace{ void isolated_helper_function(...); } // anonymous namespace ``` [**Anonymous namespaces should *never* be used in a header file.**](https://wiki.sei.cmu.edu/confluence/display/cplusplus/DCL59-CPP.+Do+not+define+an+unnamed+namespace+in+a+header+file) # Deprecating and Removing Code libcudf is constantly evolving to improve performance and better meet our users' needs. As a result, we occasionally need to break or entirely remove APIs to respond to new and improved understanding of the functionality we provide. Remaining free to do this is essential to making libcudf an agile library that can rapidly accommodate our users needs. As a result, we do not always provide a warning or any lead time prior to releasing breaking changes. On a best effort basis, the libcudf team will notify users of changes that we expect to have significant or widespread effects. Where possible, indicate pending API removals using the [deprecated](https://en.cppreference.com/w/cpp/language/attributes/deprecated) attribute and document them using Doxygen's [deprecated](https://www.doxygen.nl/manual/commands.html#cmddeprecated) command prior to removal. When a replacement API is available for a deprecated API, mention the replacement in both the deprecation message and the deprecation documentation. Pull requests that introduce deprecations should be labeled "deprecation" to facilitate discovery and removal in the subsequent release. Advertise breaking changes by labeling any pull request that breaks or removes an existing API with the "breaking" tag. This ensures that the "Breaking" section of the release notes includes a description of what has broken from the past release. Label pull requests that contain deprecations with the "non-breaking" tag. # Error Handling {#errors} libcudf follows conventions (and provides utilities) enforcing compile-time and run-time conditions and detecting and handling CUDA errors. Communication of errors is always via C++ exceptions. ## Runtime Conditions Use the `CUDF_EXPECTS` macro to enforce runtime conditions necessary for correct execution. Example usage: ```c++ CUDF_EXPECTS(lhs.type() == rhs.type(), "Column type mismatch"); ``` The first argument is the conditional expression expected to resolve to `true` under normal conditions. If the conditional evaluates to `false`, then an error has occurred and an instance of `cudf::logic_error` is thrown. The second argument to `CUDF_EXPECTS` is a short description of the error that has occurred and is used for the exception's `what()` message. There are times where a particular code path, if reached, should indicate an error no matter what. For example, often the `default` case of a `switch` statement represents an invalid alternative. Use the `CUDF_FAIL` macro for such errors. This is effectively the same as calling `CUDF_EXPECTS(false, reason)`. Example: ```c++ CUDF_FAIL("This code path should not be reached."); ``` ### CUDA Error Checking Use the `CUDF_CUDA_TRY` macro to check for the successful completion of CUDA runtime API functions. This macro throws a `cudf::cuda_error` exception if the CUDA API return value is not `cudaSuccess`. The thrown exception includes a description of the CUDA error code in its `what()` message. Example: ```c++ CUDF_CUDA_TRY( cudaMemcpy(&dst, &src, num_bytes) ); ``` ## Compile-Time Conditions Use `static_assert` to enforce compile-time conditions. For example, ```c++ template <typename T> void trivial_types_only(T t){ static_assert(std::is_trivial<T>::value, "This function requires a trivial type."); ... } ``` # Logging libcudf includes logging utilities (built on top of [spdlog](https://github.com/gabime/spdlog) library), which should be used to log important events (e.g. user warnings). This utility can also be used to log debug information, as long as the correct logging level is used. There are six macros that should be used for logging at different levels: * `CUDF_LOG_TRACE` - verbose debug messages (targeted at developers) * `CUDF_LOG_DEBUG` - debug messages (targeted at developers) * `CUDF_LOG_INFO` - information about rare events (e.g. once per run) that occur during normal execution * `CUDF_LOG_WARN` - user warnings about potentially unexpected behavior or deprecations * `CUDF_LOG_ERROR` - recoverable errors * `CUDF_LOG_CRITICAL` - unrecoverable errors (e.g. memory corruption) By default, `TRACE`, `DEBUG` and `INFO` messages are excluded from the log. In addition, in public builds, the code that logs at `TRACE` and `DEBUG` levels is compiled out. This prevents logging of potentially sensitive data that might be done for debug purposes. Also, this allows developers to include expensive computation in the trace/debug logs, as the overhead will not be present in the public builds. The minimum enabled logging level is `WARN`, and it can be modified in multiple ways: * CMake configuration variable `LIBCUDF_LOGGING_LEVEL` - sets the minimum level of logging that will be compiled in the build. Available levels are `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `CRITICAL`, and `OFF`. * Environment variable `LIBCUDF_LOGGING_LEVEL` - sets the minimum logging level during initialization. If this setting is higher than the compile-time CMake variable, any logging levels in between the two settings will be excluded from the written log. The available levels are the same as for the CMake variable. * Global logger object exposed via `cudf::logger()` - sets the minimum logging level at runtime. For example, calling `cudf::logger().set_level(spdlog::level::err)`, will exclude any messages that are not errors or critical errors. This API should not be used within libcudf to manipulate logging, its purpose is to allow upstream users to configure libcudf logging to fit their application. By default, logging messages are output to stderr. Setting the environment variable `LIBCUDF_DEBUG_LOG_FILE` redirects the log to a file with the specified path (can be relative to the current directory). Upstream users can also manipulate `cudf::logger().sinks()` to add sinks or divert the log to standard output or even a custom spdlog sink. # Data Types Columns may contain data of a number of types (see `enum class type_id` in `include/cudf/types.hpp`) * Numeric data: signed and unsigned integers (8-, 16-, 32-, or 64-bit), floats (32- or 64-bit), and Booleans (8-bit). * Timestamp data with resolution of days, seconds, milliseconds, microseconds, or nanoseconds. * Duration data with resolution of days, seconds, milliseconds, microseconds, or nanoseconds. * Decimal fixed-point data (32- or 64-bit). * Strings * Dictionaries * Lists of any type * Structs of columns of any type Most algorithms must support columns of any data type. This leads to complexity in the code, and is one of the primary challenges a libcudf developer faces. Sometimes we develop new algorithms with gradual support for more data types to make this easier. Typically we start with fixed-width data types such as numeric types and timestamps/durations, adding support for nested types later. Enabling an algorithm differently for different types uses either template specialization or SFINAE, as discussed in [Specializing Type-Dispatched Code Paths](#specializing-type-dispatched-code-paths). # Type Dispatcher libcudf stores data (for columns and scalars) "type erased" in `void*` device memory. This *type-erasure* enables interoperability with other languages and type systems, such as Python and Java. In order to determine the type, libcudf algorithms must use the run-time information stored in the column `type()` to reconstruct the data type `T` by casting the `void*` to the appropriate `T*`. This so-called *type dispatch* is pervasive throughout libcudf. The `type_dispatcher` is a central utility that automates the process of mapping the runtime type information in `data_type` to a concrete C++ type. At a high level, you call the `type_dispatcher` with a `data_type` and a function object (also known as a *functor*) with an `operator()` template. Based on the value of `data_type::id()`, the type dispatcher invokes the corresponding instantiation of the `operator()` template. This simplified example shows how the value of `data_type::id()` determines which instantiation of the `F::operator()` template is invoked. ```c++ template <typename F> void type_dispatcher(data_type t, F f){ switch(t.id()) case type_id::INT32: f.template operator()<int32_t>() case type_id::INT64: f.template operator()<int64_t>() case type_id::FLOAT: f.template operator()<float>() ... } ``` The following example shows a function object called `size_of_functor` that returns the size of the dispatched type. ```c++ struct size_of_functor{ template <typename T> int operator()(){ return sizeof(T); } }; cudf::type_dispatcher(data_type{type_id::INT8}, size_of_functor{}); // returns 1 cudf::type_dispatcher(data_type{type_id::INT32}, size_of_functor{}); // returns 4 cudf::type_dispatcher(data_type{type_id::FLOAT64}, size_of_functor{}); // returns 8 ``` By default, `type_dispatcher` uses `cudf::type_to_id<t>` to provide the mapping of `cudf::type_id` to dispatched C++ types. However, this mapping may be customized by explicitly specifying a user-defined trait for the `IdTypeMap`. For example, to always dispatch `int32_t` for all values of `cudf::type_id`: ```c++ template<cudf::type_id t> struct always_int{ using type = int32_t; } // This will always invoke `operator()<int32_t>` cudf::type_dispatcher<always_int>(data_type, f); ``` ## Avoid Multiple Type Dispatch Avoid multiple type-dispatch if possible. The compiler creates a code path for every type dispatched, so a second-level type dispatch results in quadratic growth in compilation time and object code size. As a large library with many types and functions, we are constantly working to reduce compilation time and code size. ## Specializing Type-Dispatched Code Paths It is often necessary to customize the dispatched `operator()` for different types. This can be done in several ways. The first method is to use explicit, full template specialization. This is useful for specializing behavior for single types. The following example function object prints `"int32_t"` or `"double"` when invoked with either of those types, or `"unhandled type"` otherwise. ```c++ struct type_printer { template <typename ColumnType> void operator()() { std::cout << "unhandled type\n"; } }; // Due to a bug in g++, explicit member function specializations need to be // defined outside of the class definition template <> void type_printer::operator()<int32_t>() { std::cout << "int32_t\n"; } template <> void type_printer::operator()<double>() { std::cout << "double\n"; } ``` The second method is to use [SFINAE](https://en.cppreference.com/w/cpp/language/sfinae) with `std::enable_if_t`. This is useful to partially specialize for a set of types with a common trait. The following example functor prints `integral` or `floating point` for integral or floating point types, respectively. ```c++ struct integral_or_floating_point { template <typename ColumnType, std::enable_if_t<not std::is_integral<ColumnType>::value and not std::is_floating_point<ColumnType>::value>* = nullptr> void operator()() { std::cout << "neither integral nor floating point\n"; } template <typename ColumnType, std::enable_if_t<std::is_integral<ColumnType>::value>* = nullptr> void operator()() { std::cout << "integral\n"; } template < typename ColumnType, std::enable_if_t<std::is_floating_point<ColumnType>::value>* = nullptr> void operator()() { std::cout << "floating point\n"; } }; ``` For more info on SFINAE with `std::enable_if`, [see this post](https://eli.thegreenplace.net/2014/sfinae-and-enable_if). There are a number of traits defined in `include/cudf/utilities/traits.hpp` that are useful for partial specialization of dispatched function objects. For example `is_numeric<T>()` can be used to specialize for any numeric type. # Variable-Size and Nested Data Types libcudf supports a number of variable-size and nested data types, including strings, lists, and structs. * `string`: Simply a character string, but a column of strings may have a different-length string in each row. * `list`: A list of elements of any type, so a column of lists of integers has rows with a list of integers, possibly of a different length, in each row. * `struct`: In a column of structs, each row is a structure comprising one or more fields. These fields are stored in structure-of-arrays format, so that the column of structs has a nested column for each field of the structure. As the heading implies, list and struct columns may be nested arbitrarily. One may create a column of lists of structs, where the fields of the struct may be of any type, including strings, lists and structs. Thinking about deeply nested data types can be confusing for column-based data, even with experience. Therefore it is important to carefully write algorithms, and to test and document them well. ## List columns In order to represent variable-width elements, libcudf columns contain a vector of child columns. For list columns, the parent column's type is `LIST` and contains no data, but its size represents the number of lists in the column, and its null mask represents the validity of each list element. The parent has two children. 1. A non-nullable column of [`size_type`](#cudfsize_type) elements that indicates the offset to the beginning of each list in a dense column of elements. 2. A column containing the actual data and optional null mask for all elements of all the lists packed together. With this representation, `data[offsets[i]]` is the first element of list `i`, and the size of list `i` is given by `offsets[i+1] - offsets[i]`. Note that the data may be of any type, and therefore the data column may itself be a nested column of any type. Note also that not only is each list nullable (using the null mask of the parent), but each list element may be nullable. So you may have a lists column with null row 3, and also null element 2 of row 4. The underlying data for a lists column is always bundled into a single leaf column at the very bottom of the hierarchy (ignoring structs, which conceptually "reset" the root of the hierarchy), regardless of the level of nesting. So a `List<List<List<List<int>>>>` column has a single `int` column at the very bottom. The following is a visual representation of this. ``` lists_column = { {{{1, 2}, {3, 4}}, NULL}, {{{10, 20}, {30, 40}}, {{50, 60, 70}, {0}}} } List<List<List<int>>> (2 rows): Length : 2 Offsets : 0, 2, 4 Children : List<List<int>>: Length : 4 Offsets : 0, 2, 2, 4, 6 Null count: 1 1101 Children : List<int>: Length : 6 Offsets : 0, 2, 4, 6, 8, 11, 12 Children : Column of ints 1, 2, 3, 4, 10, 20, 30, 40, 50, 60, 70, 0 ``` This is related to [Arrow's "Variable-Size List" memory layout](https://arrow.apache.org/docs/format/Columnar.html?highlight=nested%20types#physical-memory-layout). ## Strings columns Strings are represented in much the same way as lists, except that the data child column is always a non-nullable column of `INT8` data. The parent column's type is `STRING` and contains no data, but its size represents the number of strings in the column, and its null mask represents the validity of each string. To summarize, the strings column children are: 1. A non-nullable column of [`size_type`](#cudfsize_type) elements that indicates the offset to the beginning of each string in a dense column of all characters. 2. A non-nullable column of `INT8` elements of all the characters across all the strings packed together. With this representation, `characters[offsets[i]]` is the first character of string `i`, and the size of string `i` is given by `offsets[i+1] - offsets[i]`. The following image shows an example of this compound column representation of strings. ![strings](strings.png) ## Structs columns A struct is a nested data type with a set of child columns each representing an individual field of a logical struct. Field names are not represented. A structs column with `N` fields has `N` children. Each child is a column storing all the data of a single field packed column-wise, with an optional null mask. The parent column's type is `STRUCT` and contains no data, its size represents the number of struct rows in the column, and its null mask represents the validity of each struct element. With this representation, `child[0][10]` is row 10 of the first field of the struct, `child[1][42]` is row 42 of the second field of the struct. Notice that in addition to the struct column's null mask, each struct field column has its own optional null mask. A struct field's validity can vary independently from the corresponding struct row. For instance, a non-null struct row might have a null field. However, the fields of a null struct row are deemed to be null as well. For example, consider a struct column of type `STRUCT<FLOAT32, INT32>`. If the contents are `[ {1.0, 2}, {4.0, 5}, null, {8.0, null} ]`, the struct column's layout is as follows. (Note that null masks should be read from right to left.) ``` { type = STRUCT null_mask = [1, 1, 0, 1] null_count = 1 children = { { type = FLOAT32 data = [1.0, 4.0, X, 8.0] null_mask = [ 1, 1, 0, 1] null_count = 1 }, { type = INT32 data = [2, 5, X, X] null_mask = [1, 1, 0, 0] null_count = 2 } } } ``` The last struct row (index 3) is not null, but has a null value in the INT32 field. Also, row 2 of the struct column is null, making its corresponding fields also null. Therefore, bit 2 is unset in the null masks of both struct fields. ## Dictionary columns Dictionaries provide an efficient way to represent low-cardinality data by storing a single copy of each value. A dictionary comprises a column of sorted keys and a column containing an index into the keys column for each row of the parent column. The keys column may have any libcudf data type, such as a numerical type or strings. The indices represent the corresponding positions of each element's value in the keys. The indices child column can have any unsigned integer type (`UINT8`, `UINT16`, `UINT32`, or `UINT64`). ## Nested column challenges The first challenge with nested columns is that it is effectively impossible to do any operation that modifies the length of any string or list in place. For example, consider trying to append the character `'a'` to the end of each string. This requires dynamically resizing the characters column to allow inserting `'a'` at the end of each string, and then modifying the offsets column to indicate the new size of each element. As a result, every operation that can modify the strings or lists in a column must be done out-of-place. The second challenge is that in an out-of-place operation on a strings column, unlike with fixed- width elements, the size of the output cannot be known *a priori*. For example, consider scattering into a column of strings: destination: {"this", "is", "a", "column", "of", "strings"} scatter_map: {1, 3, 5} scatter_values: {"red", "green", "blue"} result: {"this", "red", "a", "green", "of", "blue"} In this example, the strings "red", "green", and "blue" will respectively be scattered into positions `1`, `3`, and `5` of `destination`. Recall from above that this operation cannot be done in place, therefore `result` will be generated by selectively copying strings from `destination` and `scatter_values`. Notice that `result`'s child column of characters requires storage for `19` characters. However, there is no way to know ahead of time that `result` will require `19` characters. Therefore, most operations that produce a new output column of strings use a two-phase approach: 1. Determine the number and size of each string in the result. This amounts to materializing the output offsets column. 2. Allocate sufficient storage for all of the output characters and materialize each output string. In scatter, the first phase consists of using the `scatter_map` to determine whether string `i` in the output will come from `destination` or from `scatter_values` and use the corresponding size(s) to materialize the offsets column and determine the size of the output. Then, in the second phase, sufficient storage is allocated for the output's characters, and then the characters are filled with the corresponding strings from either `destination` or `scatter_values`. ## Nested Type Views libcudf provides view types for nested column types as well as for the data elements within them. ### cudf::strings_column_view and cudf::string_view `cudf::strings_column_view` is a view of a strings column, like `cudf::column_view` is a view of any `cudf::column`. `cudf::string_view` is a view of a single string, and therefore `cudf::string_view` is the data type of a `cudf::column` of type `STRING` just like `int32_t` is the data type for a `cudf::column` of type [`size_type`](#cudfsize_type). As its name implies, this is a read-only object instance that points to device memory inside the strings column. It's lifespan is the same (or less) as the column it views. Use the `column_device_view::element` method to access an individual row element. Like any other column, do not call `element()` on a row that is null. ```c++ cudf::column_device_view d_strings; ... if( d_strings.is_valid(row_index) ) { string_view d_str = d_strings.element<string_view>(row_index); ... } ``` A null string is not the same as an empty string. Use the `string_scalar` class if you need an instance of a class object to represent a null string. The `string_view` contains comparison operators `<,>,==,<=,>=` that can be used in many cudf functions like `sort` without string-specific code. The data for a `string_view` instance is required to be [UTF-8](#utf-8) and all operators and methods expect this encoding. Unless documented otherwise, position and length parameters are specified in characters and not bytes. The class also includes a `string_view::const_iterator` which can be used to navigate through individual characters within the string. `cudf::type_dispatcher` dispatches to the `string_view` data type when invoked on a `STRING` column. #### UTF-8 The libcudf strings column only supports UTF-8 encoding for strings data. [UTF-8](https://en.wikipedia.org/wiki/UTF-8) is a variable-length character encoding wherein each character can be 1-4 bytes. This means the length of a string is not the same as its size in bytes. For this reason, it is recommended to use the `string_view` class to access these characters for most operations. The `string_view.cuh` header also includes some utility methods for reading and writing (`to_char_utf8/from_char_utf8`) individual UTF-8 characters to/from byte arrays. ### cudf::lists_column_view and cudf::lists_view `cudf::lists_column_view` is a view of a lists column. `cudf::list_view` is a view of a single list, and therefore `cudf::list_view` is the data type of a `cudf::column` of type `LIST`. `cudf::type_dispatcher` dispatches to the `list_view` data type when invoked on a `LIST` column. ### cudf::structs_column_view and cudf::struct_view `cudf::structs_column_view` is a view of a structs column. `cudf::struct_view` is a view of a single struct, and therefore `cudf::struct_view` is the data type of a `cudf::column` of type `STRUCT`. `cudf::type_dispatcher` dispatches to the `struct_view` data type when invoked on a `STRUCT` column. # cuIO: file reading and writing cuIO is a component of libcudf that provides GPU-accelerated reading and writing of data file formats commonly used in data analytics, including CSV, Parquet, ORC, Avro, and JSON_Lines. // TODO: add more detail and move to a separate file.
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/examples/README.md
# Libcudf Examples This folder contains examples to demonstrate libcudf use cases. Running `build.sh` builds all libcudf examples. Current examples: - Basic: demonstrates a basic use case with libcudf and building a custom application with libcudf - Strings: demonstrates using libcudf for accessing and creating strings columns and for building custom kernels for strings - Nested Types: demonstrates using libcudf for some operations on nested types
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/examples/build.sh
#!/bin/bash # Copyright (c) 2021-2023, NVIDIA CORPORATION. # libcudf examples build script # Parallelism control PARALLEL_LEVEL=${PARALLEL_LEVEL:-4} # Root of examples EXAMPLES_DIR=$(dirname "$(realpath "$0")") LIB_BUILD_DIR=${LIB_BUILD_DIR:-$(readlink -f "${EXAMPLES_DIR}/../build")} ################################################################################ # Add individual libcudf examples build scripts down below build_example() { example_dir=${1} example_dir="${EXAMPLES_DIR}/${example_dir}" build_dir="${example_dir}/build" # Configure cmake -S ${example_dir} -B ${build_dir} -Dcudf_ROOT="${LIB_BUILD_DIR}" # Build cmake --build ${build_dir} -j${PARALLEL_LEVEL} } build_example basic build_example strings build_example nested_types
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/examples/fetch_dependencies.cmake
# ============================================================================= # Copyright (c) 2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # ============================================================================= set(CPM_DOWNLOAD_VERSION v0.35.3) file( DOWNLOAD https://github.com/cpm-cmake/CPM.cmake/releases/download/${CPM_DOWNLOAD_VERSION}/get_cpm.cmake ${CMAKE_BINARY_DIR}/cmake/get_cpm.cmake ) include(${CMAKE_BINARY_DIR}/cmake/get_cpm.cmake) set(CUDF_TAG branch-24.02) CPMFindPackage( NAME cudf GIT_REPOSITORY https://github.com/rapidsai/cudf GIT_TAG ${CUDF_TAG} GIT_SHALLOW TRUE SOURCE_SUBDIR cpp )
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/libcudf_apis.cpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/copying.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/combine.hpp> #include <cudf/strings/find.hpp> #include <cudf/strings/slice.hpp> #include <cudf/strings/split/split.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cuda_runtime.h> #include <nvtx3/nvToolsExt.h> /** * @brief Redacts each name per the corresponding visibility entry * * This implementation uses libcudf APIs to create the output result. * * @param names Column of names * @param visibilities Column of visibilities * @return Redacted column of names */ std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { auto const visible = cudf::string_scalar(std::string("public")); auto const redaction = cudf::string_scalar(std::string("X X")); nvtxRangePushA("redact_strings"); auto const allowed = cudf::strings::contains(visibilities, visible); auto const redacted = cudf::copy_if_else(names, redaction, allowed->view()); auto const first_last = cudf::strings::split(redacted->view()); auto const first = first_last->view().column(0); auto const last = first_last->view().column(1); auto const last_initial = cudf::strings::slice_strings(last, 0, 1); auto const last_initial_first = cudf::table_view({last_initial->view(), first}); auto result = cudf::strings::concatenate(last_initial_first, std::string(" ")); cudaStreamSynchronize(0); nvtxRangePop(); return result; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/custom_prealloc.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <rmm/device_uvector.hpp> #include <cuda_runtime.h> #include <nvtx3/nvToolsExt.h> /** * @brief Builds the output for each row * * This thread is called once per row in d_names. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param redaction Redacted string replacement * @param working_memory Output memory for all rows * @param d_offsets Byte offset in `d_chars` for each row * @param d_output Output array of string_view objects */ __global__ void redact_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::string_view redaction, char* working_memory, cudf::size_type const* d_offsets, cudf::string_view* d_output) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1; char* output_ptr = working_memory + d_offsets[index]; d_output[index] = cudf::string_view{output_ptr, output_size}; // build output string memcpy(output_ptr, last_initial.data(), last_initial.size_bytes()); output_ptr += last_initial.size_bytes(); *output_ptr++ = ' '; memcpy(output_ptr, first.data(), first.size_bytes()); } else { d_output[index] = cudf::string_view{redaction.data(), redaction.size_bytes()}; } } /** * @brief Redacts each name per the corresponding visibility entry * * This implementation builds the individual strings into a fixed memory buffer * and then calls a factory function to gather them into a strings column. * * @param names Column of names * @param visibilities Column of visibilities * @return Redacted column of names */ std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { // all device memory operations and kernel functions will run on this stream auto stream = rmm::cuda_stream_default; auto const d_names = cudf::column_device_view::create(names, stream); auto const d_visibilities = cudf::column_device_view::create(visibilities, stream); auto const d_redaction = cudf::string_scalar(std::string("X X"), true, stream); constexpr int block_size = 128; // this arbitrary size should be a power of 2 auto const blocks = (names.size() + block_size - 1) / block_size; nvtxRangePushA("redact_strings"); auto const scv = cudf::strings_column_view(names); auto const offsets = scv.offsets_begin(); // create working memory to hold the output of each string auto working_memory = rmm::device_uvector<char>(scv.chars_size(), stream); // create a vector for the output strings' pointers auto str_ptrs = rmm::device_uvector<cudf::string_view>(names.size(), stream); // build the output strings redact_kernel<<<blocks, block_size, 0, stream.value()>>>(*d_names, *d_visibilities, d_redaction.value(), working_memory.data(), offsets, str_ptrs.data()); // create strings column from the string_pairs; // this copies all the individual strings into a single output column auto result = cudf::make_strings_column(str_ptrs, cudf::string_view{nullptr, 0}, stream); // temporary memory cleanup cost here for str_ptrs and working_memory // wait for all of the above to finish stream.synchronize(); nvtxRangePop(); return result; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/CMakeLists.txt
# Copyright (c) 2022-2023, NVIDIA CORPORATION. cmake_minimum_required(VERSION 3.26.4) project( strings_examples VERSION 0.0.1 LANGUAGES CXX CUDA ) include(../fetch_dependencies.cmake) list(APPEND CUDF_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr) # add_executable(libcudf_apis libcudf_apis.cpp) target_compile_features(libcudf_apis PRIVATE cxx_std_17) target_link_libraries(libcudf_apis PRIVATE cudf::cudf nvToolsExt) add_executable(custom_with_malloc custom_with_malloc.cu) target_compile_features(custom_with_malloc PRIVATE cxx_std_17) target_compile_options(custom_with_malloc PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>") target_link_libraries(custom_with_malloc PRIVATE cudf::cudf nvToolsExt) add_executable(custom_prealloc custom_prealloc.cu) target_compile_features(custom_prealloc PRIVATE cxx_std_17) target_compile_options(custom_prealloc PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>") target_link_libraries(custom_prealloc PRIVATE cudf::cudf nvToolsExt) add_executable(custom_optimized custom_optimized.cu) target_compile_features(custom_optimized PRIVATE cxx_std_17) target_compile_options(custom_optimized PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>") target_link_libraries(custom_optimized PRIVATE cudf::cudf nvToolsExt)
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/names.csv
John Doe,public Jane Doe,private Billy Joe,private James James,public Michael Frederick,public Christopher Cheryl,public Jessica Autumn,public Matthew Tyrone,public Ashley Martha,public Jennifer Omar,public Joshua Lydia,public Amanda Jerome,public Daniel Theodore,public David Abby,public James Neil,public Robert Shawna,private John Sierra,private Joseph Nina,private Andrew Tammy,private Ryan Nikki,public
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/README.md
# libcudf C++ examples using strings columns This C++ example demonstrates using libcudf APIs to access and create strings columns. The example source code loads a csv file and produces a redacted strings column from the names column using the values from the visibilities column. Four examples are included: 1. Using libcudf APIs to build the output 2. Using a simple custom kernel with dynamic memory 3. Using a custom kernel with pre-allocated device memory 4. Using a two-pass approach to improve performance These examples are described in more detail in https://developer.nvidia.com/blog/mastering-string-transformations-in-rapids-libcudf/ ## Compile and execute ```bash # Configure project cmake -S . -B build/ # Build cmake --build build/ --parallel $PARALLEL_LEVEL # Execute build/libcudf_apis names.csv --OR-- build/custom_with_malloc names.csv --OR-- build/custom_prealloc names.csv --OR-- build/custom_optimized names.csv ``` If your machine does not come with a pre-built libcudf binary, expect the first build to take some time, as it would build libcudf on the host machine. It may be sped up by configuring the proper `PARALLEL_LEVEL` number.
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/custom_with_malloc.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/scalar/scalar.hpp> #include <rmm/device_uvector.hpp> #include <cuda_runtime.h> #include <nvtx3/nvToolsExt.h> /** * @brief Reserve CUDA malloc heap size * * Call this function to change the CUDA malloc heap size limit. * This value depends on the total size of all the malloc() * calls needed for redact_kernel. * * @param heap_size Number of bytes to reserve * Default is 1GB */ void set_malloc_heap_size(size_t heap_size = 1073741824) // 1GB { size_t max_malloc_heap_size = 0; cudaDeviceGetLimit(&max_malloc_heap_size, cudaLimitMallocHeapSize); if (max_malloc_heap_size < heap_size) { max_malloc_heap_size = heap_size; if (cudaDeviceSetLimit(cudaLimitMallocHeapSize, max_malloc_heap_size) != cudaSuccess) { fprintf(stderr, "could not set malloc heap size to %ldMB\n", (heap_size / (1024 * 1024))); throw std::runtime_error(""); } } } /** * @brief Builds the output for each row * * This thread is called once per row in d_names. * * Note: This uses malloc() in a device kernel which works great * but is not very efficient. This can be useful for prototyping * on functions where performance is not yet important. * All calls to malloc() must have a corresponding free() call. * The separate free_kernel is launched for this purpose. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param redaction Redacted string replacement * @param d_output Output array of string_view objects */ __global__ void redact_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::string_view redaction, cudf::string_view* d_output) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1; char* output_ptr = static_cast<char*>(malloc(output_size)); d_output[index] = cudf::string_view{output_ptr, output_size}; // build output string memcpy(output_ptr, last_initial.data(), last_initial.size_bytes()); output_ptr += last_initial.size_bytes(); *output_ptr++ = ' '; memcpy(output_ptr, first.data(), first.size_bytes()); } else { d_output[index] = cudf::string_view{redaction.data(), redaction.size_bytes()}; } } /** * @brief Frees the temporary individual string objects created in the * redact_kernel * * Like malloc(), free() is not very efficient but must be called for * each malloc() to return the memory to the CUDA malloc heap. * * @param redaction Redacted string replacement (not to be freed) * @param d_output Output array of string_view objects to free */ __global__ void free_kernel(cudf::string_view redaction, cudf::string_view* d_output, int count) { auto index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= count) return; auto ptr = const_cast<char*>(d_output[index].data()); if (ptr != redaction.data()) { free(ptr); } } std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { // all device memory operations and kernel functions will run on this stream auto stream = rmm::cuda_stream_default; set_malloc_heap_size(); // to illustrate adjusting the malloc heap auto const d_names = cudf::column_device_view::create(names, stream); auto const d_visibilities = cudf::column_device_view::create(visibilities, stream); auto const d_redaction = cudf::string_scalar(std::string("X X"), true, stream); constexpr int block_size = 128; // this arbitrary size should be a power of 2 auto const blocks = (names.size() + block_size - 1) / block_size; nvtxRangePushA("redact_strings"); // create a vector for the output strings' pointers auto str_ptrs = new rmm::device_uvector<cudf::string_view>(names.size(), stream); auto result = [&] { // build the output strings redact_kernel<<<blocks, block_size, 0, stream.value()>>>( *d_names, *d_visibilities, d_redaction.value(), str_ptrs->data()); // create strings column from the string_view vector // this copies all the individual strings into a single output column return cudf::make_strings_column(*str_ptrs, cudf::string_view{nullptr, 0}, stream); }(); // free the individual temporary memory pointers free_kernel<<<blocks, block_size, 0, stream.value()>>>( d_redaction.value(), str_ptrs->data(), names.size()); delete str_ptrs; // wait for all of the above to finish stream.synchronize(); nvtxRangePop(); return result; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/custom_optimized.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.hpp" #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/scan.h> #include <cuda_runtime.h> #include <nvtx3/nvToolsExt.h> /** * @brief Computes the size of each output row * * This thread is called once per row in d_names. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param d_sizes Output sizes for each row */ __global__ void sizes_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::size_type* d_sizes) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const redaction = cudf::string_view("X X", 3); auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); cudf::size_type result = redaction.size_bytes(); // init to redaction size if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); result = first.size_bytes() + last_initial.size_bytes() + 1; } d_sizes[index] = result; } /** * @brief Builds the output for each row * * This thread is called once per row in d_names. * * @param d_names Column of names * @param d_visibilities Column of visibilities * @param d_offsets Byte offset in `d_chars` for each row * @param d_chars Output memory for all rows */ __global__ void redact_kernel(cudf::column_device_view const d_names, cudf::column_device_view const d_visibilities, cudf::size_type const* d_offsets, char* d_chars) { // The row index is resolved from the CUDA thread/block objects auto index = threadIdx.x + blockIdx.x * blockDim.x; // There may be more threads than actual rows if (index >= d_names.size()) return; auto const visible = cudf::string_view("public", 6); auto const redaction = cudf::string_view("X X", 3); // resolve output_ptr using the offsets vector char* output_ptr = d_chars + d_offsets[index]; auto const name = d_names.element<cudf::string_view>(index); auto const vis = d_visibilities.element<cudf::string_view>(index); if (vis == visible) { auto const space_idx = name.find(' '); auto const first = name.substr(0, space_idx); auto const last_initial = name.substr(space_idx + 1, 1); auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1; // build output string memcpy(output_ptr, last_initial.data(), last_initial.size_bytes()); output_ptr += last_initial.size_bytes(); *output_ptr++ = ' '; memcpy(output_ptr, first.data(), first.size_bytes()); } else { memcpy(output_ptr, redaction.data(), redaction.size_bytes()); } } /** * @brief Redacts each name per the corresponding visibility entry * * This implementation builds the strings column children (offsets and chars) * directly into device memory for libcudf. * * @param names Column of names * @param visibilities Column of visibilities * @return Redacted column of names */ std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities) { // all device memory operations and kernel functions will run on this stream auto stream = rmm::cuda_stream_default; auto const d_names = cudf::column_device_view::create(names, stream); auto const d_visibilities = cudf::column_device_view::create(visibilities, stream); constexpr int block_size = 128; // this arbitrary size should be a power of 2 int const blocks = (names.size() + block_size - 1) / block_size; nvtxRangePushA("redact_strings"); // create offsets vector auto offsets = rmm::device_uvector<cudf::size_type>(names.size() + 1, stream); // compute output sizes sizes_kernel<<<blocks, block_size, 0, stream.value()>>>( *d_names, *d_visibilities, offsets.data()); // convert sizes to offsets (in place) thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin()); // last element is the total output size // (device-to-host copy of 1 integer -- includes syncing the stream) cudf::size_type output_size = offsets.back_element(stream); // create chars vector auto chars = rmm::device_uvector<char>(output_size, stream); // build chars output redact_kernel<<<blocks, block_size, 0, stream.value()>>>( *d_names, *d_visibilities, offsets.data(), chars.data()); // create column from offsets and chars vectors (no copy is performed) auto result = cudf::make_strings_column(names.size(), std::move(offsets), std::move(chars), {}, 0); // wait for all of the above to finish stream.synchronize(); nvtxRangePop(); return result; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/strings/common.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/io/csv.hpp> #include <cudf/io/datasource.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <rmm/mr/device/owning_wrapper.hpp> #include <rmm/mr/device/pool_memory_resource.hpp> #include <chrono> #include <iostream> #include <memory> #include <string> /** * @brief Main example function returns redacted strings column. * * This function returns a redacted version of the input `names` column * using the `visibilities` column as in the following example * ``` * names visibility --> redacted * John Doe public D John * Bobby Joe private X X * ``` * * @param names First and last names separated with a single space * @param visibilities String values `public` or `private` only * @return Redacted strings column */ std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names, cudf::column_view const& visibilities); /** * @brief Create CUDA memory resource */ auto make_cuda_mr() { return std::make_shared<rmm::mr::cuda_memory_resource>(); } /** * @brief Create a pool device memory resource */ auto make_pool_mr() { return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda_mr()); } /** * @brief Create memory resource for libcudf functions */ std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource(std::string const& name) { if (name == "pool") { return make_pool_mr(); } return make_cuda_mr(); } /** * @brief Main for strings examples * * Command line parameters: * 1. CSV file name/path * 2. Memory resource (optional): 'pool' or 'cuda' * * The stdout includes the number of rows in the input and the output size in bytes. */ int main(int argc, char const** argv) { if (argc < 2) { std::cout << "required parameter: csv-file-path\n"; return 1; } auto const mr_name = std::string{argc > 2 ? std::string(argv[2]) : std::string("cuda")}; auto resource = create_memory_resource(mr_name); rmm::mr::set_current_device_resource(resource.get()); auto const csv_file = std::string{argv[1]}; auto const csv_result = [csv_file] { cudf::io::csv_reader_options in_opts = cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_file}).header(-1); return cudf::io::read_csv(in_opts).tbl; }(); auto const csv_table = csv_result->view(); std::cout << "table: " << csv_table.num_rows() << " rows " << csv_table.num_columns() << " columns\n"; auto st = std::chrono::steady_clock::now(); auto result = redact_strings(csv_table.column(0), csv_table.column(1)); std::chrono::duration<double> elapsed = std::chrono::steady_clock::now() - st; std::cout << "Wall time: " << elapsed.count() << " seconds\n"; std::cout << "Output size " << result->view().child(1).size() << " bytes\n"; return 0; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/nested_types/CMakeLists.txt
# Copyright (c) 2023, NVIDIA CORPORATION. cmake_minimum_required(VERSION 3.26.4) project( nested_types VERSION 0.0.1 LANGUAGES CXX CUDA ) include(../fetch_dependencies.cmake) # Configure your project here add_executable(deduplication deduplication.cpp) target_link_libraries(deduplication PRIVATE cudf::cudf) target_compile_features(deduplication PRIVATE cxx_std_17)
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/nested_types/example.json
{"features": {"key": "a1", "values": [{"info": "message_1", "type": "device_a", "dt": 1688750001}]}, "source": "network_a", "quality": 0.7} {"features": {"key": "a2", "values": [{"info": "message_2", "type": "device_a", "dt": 1688750002}]}, "source": "network_a", "quality": 0.7} {"features": {"key": "a3", "values": [{"info": "message_3", "type": "device_a", "dt": 1688750003}]}, "source": "network_b", "quality": 0.8} {"features": {"key": "a1", "values": [{"info": "message_1", "type": "device_a", "dt": 1688750001}]}, "source": "network_b", "quality": 0.9} {"features": {"key": "a4", "values": [{"info": "message_4", "type": "device_a", "dt": 1688750004}]}, "source": "network_b", "quality": 0.9}
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/nested_types/deduplication.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/groupby.hpp> #include <cudf/io/json.hpp> #include <cudf/io/types.hpp> #include <cudf/join.hpp> #include <cudf/sorting.hpp> #include <cudf/stream_compaction.hpp> #include <cudf/table/table_view.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <rmm/mr/device/owning_wrapper.hpp> #include <rmm/mr/device/pool_memory_resource.hpp> #include <chrono> #include <iostream> #include <string> /** * @file deduplication.cpp * @brief Demonstrates usage of the libcudf APIs to perform operations on nested-type tables. * * The algorithms chosen to be demonstrated are to showcase nested-type row operators of three * kinds: * 1. hashing: Used by functions `count_aggregate` and `join_count` to hash inputs of any type * 2. equality: Used by functions `count_aggregate` and `join_count` in conjunction with hashing * to determine equality for nested types * 3. lexicographic: Used by function `sort_keys` to create a lexicographical order for nested-types * so as to enable sorting * */ /** * @brief Create memory resource for libcudf functions * * @param pool Whether to use a pool memory resource. * @return Memory resource instance */ std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource(bool pool) { auto cuda_mr = std::make_shared<rmm::mr::cuda_memory_resource>(); if (pool) { return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(cuda_mr); } return cuda_mr; } /** * @brief Read JSON input from file * * @param filepath path to input JSON file * @return cudf::io::table_with_metadata */ cudf::io::table_with_metadata read_json(std::string filepath) { auto source_info = cudf::io::source_info(filepath); auto builder = cudf::io::json_reader_options::builder(source_info).lines(true); auto options = builder.build(); return cudf::io::read_json(options); } /** * @brief Write JSON output to file * * @param input table to write * @param metadata metadata of input table read by JSON reader * @param filepath path to output JSON file */ void write_json(cudf::table_view input, cudf::io::table_metadata metadata, std::string filepath) { // write the data for inspection auto sink_info = cudf::io::sink_info(filepath); auto builder = cudf::io::json_writer_options::builder(sink_info, input).lines(true); builder.metadata(metadata); auto options = builder.build(); cudf::io::write_json(options); } /** * @brief Aggregate count of duplicate rows in nested-type column * * @param input table to aggregate * @return std::unique_ptr<cudf::table> */ std::unique_ptr<cudf::table> count_aggregate(cudf::table_view input) { // Get count for each key auto keys = cudf::table_view{{input.column(0)}}; auto val = cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT32}, keys.num_rows()); cudf::groupby::groupby grpby_obj(keys); std::vector<cudf::groupby::aggregation_request> requests; requests.emplace_back(cudf::groupby::aggregation_request()); auto agg = cudf::make_count_aggregation<cudf::groupby_aggregation>(); requests[0].aggregations.push_back(std::move(agg)); requests[0].values = *val; auto agg_results = grpby_obj.aggregate(requests); auto result_key = std::move(agg_results.first); auto result_val = std::move(agg_results.second[0].results[0]); auto left_cols = result_key->release(); left_cols.push_back(std::move(result_val)); return std::make_unique<cudf::table>(std::move(left_cols)); } /** * @brief Join each row with its duplicate counts * * @param left left table * @param right right table * @return std::unique_ptr<cudf::table> */ std::unique_ptr<cudf::table> join_count(cudf::table_view left, cudf::table_view right) { auto [left_indices, right_indices] = cudf::inner_join(cudf::table_view{{left.column(0)}}, cudf::table_view{{right.column(0)}}); auto new_left = cudf::gather(left, cudf::device_span<cudf::size_type const>{*left_indices}); auto new_right = cudf::gather(right, cudf::device_span<cudf::size_type const>{*right_indices}); auto left_cols = new_left->release(); auto right_cols = new_right->release(); left_cols.push_back(std::move(right_cols[1])); return std::make_unique<cudf::table>(std::move(left_cols)); } /** * @brief Sort nested-type column * * @param input table to sort * @return std::unique_ptr<cudf::table> * * @note if stability is desired, use `cudf::stable_sorted_order` */ std::unique_ptr<cudf::table> sort_keys(cudf::table_view input) { auto sort_order = cudf::sorted_order(cudf::table_view{{input.column(0)}}); return cudf::gather(input, *sort_order); } /** * @brief Main for nested_types examples * * Command line parameters: * 1. JSON input file name/path (default: "example.json") * 2. JSON output file name/path (default: "output.json") * 3. Memory resource (optional): "pool" or "cuda" (default: "pool") * * Example invocation from directory `cudf/cpp/examples/nested_types`: * ./build/deduplication example.json output.json pool * */ int main(int argc, char const** argv) { std::string input_filepath; std::string output_filepath; std::string mr_name; if (argc != 4 && argc != 1) { std::cout << "Either provide all command-line arguments, or none to use defaults" << std::endl; return 1; } if (argc == 1) { input_filepath = "example.json"; output_filepath = "output.json"; mr_name = "pool"; } else { input_filepath = argv[1]; output_filepath = argv[2]; mr_name = argv[3]; } auto pool = mr_name == "pool"; auto resource = create_memory_resource(pool); rmm::mr::set_current_device_resource(resource.get()); std::cout << "Reading " << input_filepath << "..." << std::endl; // read input file auto [input, metadata] = read_json(input_filepath); auto count = count_aggregate(input->view()); auto combined = join_count(input->view(), count->view()); auto sorted = sort_keys(combined->view()); metadata.schema_info.emplace_back("count"); std::cout << "Writing " << output_filepath << "..." << std::endl; write_json(sorted->view(), metadata, output_filepath); return 0; }
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/basic/CMakeLists.txt
# Copyright (c) 2020-2023, NVIDIA CORPORATION. cmake_minimum_required(VERSION 3.26.4) project( basic_example VERSION 0.0.1 LANGUAGES CXX CUDA ) include(../fetch_dependencies.cmake) # Configure your project here add_executable(basic_example src/process_csv.cpp) target_link_libraries(basic_example PRIVATE cudf::cudf) target_compile_features(basic_example PRIVATE cxx_std_17)
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/basic/README.md
# Basic Standalone libcudf C++ application This C++ example demonstrates a basic libcudf use case and provides a minimal example of building your own application based on libcudf using CMake. The example source code loads a csv file that contains stock prices from 4 companies spanning across 5 days, computes the average of the closing price for each company and writes the result in csv format. ## Compile and execute ```bash # Configure project cmake -S . -B build/ # Build cmake --build build/ --parallel $PARALLEL_LEVEL # Execute build/basic_example ``` If your machine does not come with a pre-built libcudf binary, expect the first build to take some time, as it would build libcudf on the host machine. It may be sped up by configuring the proper `PARALLEL_LEVEL` number.
0
rapidsai_public_repos/cudf/cpp/examples
rapidsai_public_repos/cudf/cpp/examples/basic/4stock_5day.csv
Company,Date,Open,High,Low,Close,Volume MSFT,2021-03-03,232.16000366210938,233.5800018310547,227.25999450683594,227.55999755859375,33950400.0 MSFT,2021-03-04,226.74000549316406,232.49000549316406,224.25999450683594,226.72999572753906,44584200.0 MSFT,2021-03-05,229.52000427246094,233.27000427246094,226.4600067138672,231.60000610351562,41842100.0 MSFT,2021-03-08,231.3699951171875,233.3699951171875,227.1300048828125,227.38999938964844,35245900.0 MSFT,2021-03-09,232.8800048828125,235.3800048828125,231.6699981689453,233.77999877929688,33034000.0 GOOG,2021-03-03,2067.2099609375,2088.51806640625,2010.0,2026.7099609375,1483100.0 GOOG,2021-03-04,2023.3699951171875,2089.239990234375,2020.27001953125,2049.090087890625,2116100.0 GOOG,2021-03-05,2073.1201171875,2118.110107421875,2046.4150390625,2108.5400390625,2193800.0 GOOG,2021-03-08,2101.1298828125,2128.81005859375,2021.6099853515625,2024.1700439453125,1646000.0 GOOG,2021-03-09,2070.0,2078.0400390625,2047.8299560546875,2052.699951171875,1696400.0 AMZN,2021-03-03,3081.179931640625,3107.780029296875,2995.0,3005.0,3967200.0 AMZN,2021-03-04,3012.0,3058.1298828125,2945.429931640625,2977.570068359375,5458700.0 AMZN,2021-03-05,3005.0,3009.0,2881.0,3000.4599609375,5383400.0 AMZN,2021-03-08,3015.0,3064.590087890625,2951.31005859375,2951.949951171875,4178500.0 AMZN,2021-03-09,3017.989990234375,3090.9599609375,3005.14990234375,3062.85009765625,4023500.0 AAPL,2021-03-03,124.80999755859375,125.70999908447266,121.83999633789062,122.05999755859375,112430400.0 AAPL,2021-03-04,121.75,123.5999984741211,118.62000274658203,120.12999725341797,177275300.0 AAPL,2021-03-05,120.9800033569336,121.94000244140625,117.56999969482422,121.41999816894531,153590400.0 AAPL,2021-03-08,120.93000030517578,121.0,116.20999908447266,116.36000061035156,153918600.0 AAPL,2021-03-09,119.02999877929688,122.05999755859375,118.79000091552734,121.08999633789062,129159600.0
0
rapidsai_public_repos/cudf/cpp/examples/basic
rapidsai_public_repos/cudf/cpp/examples/basic/src/process_csv.cpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/aggregation.hpp> #include <cudf/groupby.hpp> #include <cudf/io/csv.hpp> #include <cudf/table/table.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <rmm/mr/device/pool_memory_resource.hpp> #include <memory> #include <string> #include <utility> #include <vector> cudf::io::table_with_metadata read_csv(std::string const& file_path) { auto source_info = cudf::io::source_info(file_path); auto builder = cudf::io::csv_reader_options::builder(source_info); auto options = builder.build(); return cudf::io::read_csv(options); } void write_csv(cudf::table_view const& tbl_view, std::string const& file_path) { auto sink_info = cudf::io::sink_info(file_path); auto builder = cudf::io::csv_writer_options::builder(sink_info, tbl_view); auto options = builder.build(); cudf::io::write_csv(options); } std::vector<cudf::groupby::aggregation_request> make_single_aggregation_request( std::unique_ptr<cudf::groupby_aggregation>&& agg, cudf::column_view value) { std::vector<cudf::groupby::aggregation_request> requests; requests.emplace_back(cudf::groupby::aggregation_request()); requests[0].aggregations.push_back(std::move(agg)); requests[0].values = value; return requests; } std::unique_ptr<cudf::table> average_closing_price(cudf::table_view stock_info_table) { // Schema: | Company | Date | Open | High | Low | Close | Volume | auto keys = cudf::table_view{{stock_info_table.column(0)}}; // Company auto val = stock_info_table.column(5); // Close // Compute the average of each company's closing price with entire column cudf::groupby::groupby grpby_obj(keys); auto requests = make_single_aggregation_request(cudf::make_mean_aggregation<cudf::groupby_aggregation>(), val); auto agg_results = grpby_obj.aggregate(requests); // Assemble the result auto result_key = std::move(agg_results.first); auto result_val = std::move(agg_results.second[0].results[0]); std::vector<cudf::column_view> columns{result_key->get_column(0), *result_val}; return std::make_unique<cudf::table>(cudf::table_view(columns)); } int main(int argc, char** argv) { // Construct a CUDA memory resource using RAPIDS Memory Manager (RMM) // This is the default memory resource for libcudf for allocating device memory. rmm::mr::cuda_memory_resource cuda_mr{}; // Construct a memory pool using the CUDA memory resource // Using a memory pool for device memory allocations is important for good performance in libcudf. // The pool defaults to allocating half of the available GPU memory. rmm::mr::pool_memory_resource mr{&cuda_mr}; // Set the pool resource to be used by default for all device memory allocations // Note: It is the user's responsibility to ensure the `mr` object stays alive for the duration of // it being set as the default // Also, call this before the first libcudf API call to ensure all data is allocated by the same // memory resource. rmm::mr::set_current_device_resource(&mr); // Read data auto stock_table_with_metadata = read_csv("4stock_5day.csv"); // Process auto result = average_closing_price(*stock_table_with_metadata.tbl); // Write out result write_csv(*result, "4stock_5day_avg_close.csv"); return 0; }
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/notebooks/cupy-interop.ipynb
import timeit import cupy as cp from packaging import version import cudf if version.parse(cp.__version__) >= version.parse("10.0.0"): cupy_from_dlpack = cp.from_dlpack else: cupy_from_dlpack = cp.fromDlpacknelem = 10000 df = cudf.DataFrame( { "a": range(nelem), "b": range(500, nelem + 500), "c": range(1000, nelem + 1000), } ) %timeit arr_cupy = cupy_from_dlpack(df.to_dlpack()) %timeit arr_cupy = df.values %timeit arr_cupy = df.to_cupy()arr_cupy = cupy_from_dlpack(df.to_dlpack()) arr_cupycol = "a" %timeit cola_cupy = cp.asarray(df[col]) %timeit cola_cupy = cupy_from_dlpack(df[col].to_dlpack()) %timeit cola_cupy = df[col].valuescola_cupy = cp.asarray(df[col]) cola_cupyreshaped_arr = cola_cupy.reshape(50, 200) reshaped_arrreshaped_arr.diagonal()cp.linalg.norm(reshaped_arr)%timeit reshaped_df = cudf.DataFrame(reshaped_arr)reshaped_df = cudf.DataFrame(reshaped_arr) reshaped_df.head()cp.isfortran(reshaped_arr)%%timeit fortran_arr = cp.asfortranarray(reshaped_arr) reshaped_df = cudf.DataFrame(fortran_arr)%%timeit fortran_arr = cp.asfortranarray(reshaped_arr) reshaped_df = cudf.from_dlpack(fortran_arr.toDlpack())fortran_arr = cp.asfortranarray(reshaped_arr) reshaped_df = cudf.DataFrame(fortran_arr) reshaped_df.head()cudf.Series(reshaped_arr.diagonal()).head()reshaped_df.head()new_arr = cupy_from_dlpack(reshaped_df.to_dlpack()) new_arr.sum(axis=1)def cudf_to_cupy_sparse_matrix(data, sparseformat="column"): """Converts a cuDF object to a CuPy Sparse Column matrix.""" if sparseformat not in ( "row", "column", ): raise ValueError("Let's focus on column and row formats for now.") _sparse_constructor = cp.sparse.csc_matrix if sparseformat == "row": _sparse_constructor = cp.sparse.csr_matrix return _sparse_constructor(cupy_from_dlpack(data.to_dlpack()))df = cudf.DataFrame() nelem = 10000 nonzero = 1000 for i in range(20): arr = cp.random.normal(5, 5, nelem) arr[cp.random.choice(arr.shape[0], nelem - nonzero, replace=False)] = 0 df["a" + str(i)] = arrdf.head()sparse_data = cudf_to_cupy_sparse_matrix(df) print(sparse_data)
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/notebooks/README.md
# cuDF Notebooks ## Intro These notebooks provide examples of how to use cuDF. These notebooks are designed to be self-contained with the `runtime` version of the [RAPIDS Docker Container](https://hub.docker.com/r/rapidsai/rapidsai/) and [RAPIDS Nightly Docker Containers](https://hub.docker.com/r/rapidsai/rapidsai-nightly) and can run on air-gapped systems. You can quickly get this container using the install guide from the [RAPIDS.ai Getting Started page](https://rapids.ai/start.html#get-rapids) ## RAPIDS notebooks Visit the main RAPIDS [notebooks](https://github.com/rapidsai/notebooks) repo for a listing of all notebooks across all RAPIDS libraries.
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/notebooks/10min.ipynb
import os import cupy as cp import pandas as pd import cudf import dask_cudf cp.random.seed(12) #### Portions of this were borrowed and adapted from the #### cuDF cheatsheet, existing cuDF documentation, #### and 10 Minutes to Pandas.s = cudf.Series([1, 2, 3, None, 4]) sds = dask_cudf.from_cudf(s, npartitions=2) # Note the call to head here to show the first few entries, unlike # cuDF objects, dask-cuDF objects do not have a printing # representation that shows values since they may not be in local # memory. ds.head(n=3)df = cudf.DataFrame( { "a": list(range(20)), "b": list(reversed(range(20))), "c": list(range(20)), } ) dfddf = dask_cudf.from_cudf(df, npartitions=2) ddf.head()pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]}) gdf = cudf.DataFrame.from_pandas(pdf) gdfdask_gdf = dask_cudf.from_cudf(gdf, npartitions=2) dask_gdf.head(n=2)df.head(2)ddf.head(2)df.sort_values(by="b")ddf.sort_values(by="b").head()df["a"]ddf["a"].head()df.loc[2:5, ["a", "b"]]ddf.loc[2:5, ["a", "b"]].head()df.iloc[0]df.iloc[0:3, 0:2]df[3:5]s[3:5]df[df.b > 15]ddf[ddf.b > 15].head(n=3)df.query("b == 3")ddf.query("b == 3").compute()cudf_comparator = 3 df.query("b == @cudf_comparator")dask_cudf_comparator = 3 ddf.query("b == @val", local_dict={"val": dask_cudf_comparator}).compute()df[df.a.isin([0, 5])]arrays = [["a", "a", "b", "b"], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) idxgdf1 = cudf.DataFrame( {"first": cp.random.rand(4), "second": cp.random.rand(4)} ) gdf1.index = idx gdf1gdf2 = cudf.DataFrame( {"first": cp.random.rand(4), "second": cp.random.rand(4)} ).T gdf2.columns = idx gdf2gdf1.loc[("b", 3)]gdf1.iloc[0:2]s.fillna(999)ds.fillna(999).head(n=3)s.mean(), s.var()ds.mean().compute(), ds.var().compute()def add_ten(num): return num + 10 df["a"].apply(add_ten)ddf["a"].map_partitions(add_ten).head()df.a.value_counts()ddf.a.value_counts().head()s = cudf.Series(["A", "B", "C", "Aaba", "Baca", None, "CABA", "dog", "cat"]) s.str.lower()ds = dask_cudf.from_cudf(s, npartitions=2) ds.str.lower().head(n=4)s.str.match("^[aAc].+")ds.str.match("^[aAc].+").head()s = cudf.Series([1, 2, 3, None, 5]) cudf.concat([s, s])ds2 = dask_cudf.from_cudf(s, npartitions=2) dask_cudf.concat([ds2, ds2]).head(n=3)df_a = cudf.DataFrame() df_a["key"] = ["a", "b", "c", "d", "e"] df_a["vals_a"] = [float(i + 10) for i in range(5)] df_b = cudf.DataFrame() df_b["key"] = ["a", "c", "e"] df_b["vals_b"] = [float(i + 100) for i in range(3)] merged = df_a.merge(df_b, on=["key"], how="left") mergedddf_a = dask_cudf.from_cudf(df_a, npartitions=2) ddf_b = dask_cudf.from_cudf(df_b, npartitions=2) merged = ddf_a.merge(ddf_b, on=["key"], how="left").head(n=4) mergeddf["agg_col1"] = [1 if x % 2 == 0 else 0 for x in range(len(df))] df["agg_col2"] = [1 if x % 3 == 0 else 0 for x in range(len(df))] ddf = dask_cudf.from_cudf(df, npartitions=2)df.groupby("agg_col1").sum()ddf.groupby("agg_col1").sum().compute()df.groupby(["agg_col1", "agg_col2"]).sum()ddf.groupby(["agg_col1", "agg_col2"]).sum().compute()df.groupby("agg_col1").agg({"a": "max", "b": "mean", "c": "sum"})ddf.groupby("agg_col1").agg({"a": "max", "b": "mean", "c": "sum"}).compute()sample = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) samplesample.transpose()import datetime as dt date_df = cudf.DataFrame() date_df["date"] = pd.date_range("11/20/2018", periods=72, freq="D") date_df["value"] = cp.random.sample(len(date_df)) search_date = dt.datetime.strptime("2018-11-23", "%Y-%m-%d") date_df.query("date <= @search_date")date_ddf = dask_cudf.from_cudf(date_df, npartitions=2) date_ddf.query( "date <= @search_date", local_dict={"search_date": search_date} ).compute()gdf = cudf.DataFrame( {"id": [1, 2, 3, 4, 5, 6], "grade": ["a", "b", "b", "a", "a", "e"]} ) gdf["grade"] = gdf["grade"].astype("category") gdfdgdf = dask_cudf.from_cudf(gdf, npartitions=2) dgdf.head(n=3)gdf.grade.cat.categoriesgdf.grade.cat.codesdgdf.grade.cat.codes.compute()df.head().to_pandas()ddf.head().to_pandas()ddf.compute().to_pandas().head()df.to_numpy()ddf.compute().to_numpy()df["a"].to_numpy()ddf["a"].compute().to_numpy()df.to_arrow()ddf.head().to_arrow()if not os.path.exists("example_output"): os.mkdir("example_output") df.to_csv("example_output/foo.csv", index=False)ddf.compute().to_csv("example_output/foo_dask.csv", index=False)df = cudf.read_csv("example_output/foo.csv") dfddf = dask_cudf.read_csv("example_output/foo_dask.csv") ddf.head()ddf = dask_cudf.read_csv("example_output/*.csv") ddf.head()df.to_parquet("example_output/temp_parquet")df = cudf.read_parquet("example_output/temp_parquet") dfddf.to_parquet("example_output/ddf_parquet_files")df.to_orc("example_output/temp_orc")df2 = cudf.read_orc("example_output/temp_orc") df2import time from dask.distributed import Client, wait from dask_cuda import LocalCUDACluster cluster = LocalCUDACluster() client = Client(cluster)nrows = 10000000 df2 = cudf.DataFrame({"a": cp.arange(nrows), "b": cp.arange(nrows)}) ddf2 = dask_cudf.from_cudf(df2, npartitions=16) ddf2["c"] = ddf2["a"] + 5 ddf2ddf2 = ddf2.persist() ddf2# Sleep to ensure the persist finishes and shows in the memory usage !sleep 5; nvidia-smiimport random nrows = 10000000 df1 = cudf.DataFrame({"a": cp.arange(nrows), "b": cp.arange(nrows)}) ddf1 = dask_cudf.from_cudf(df1, npartitions=100) def func(df): time.sleep(random.randint(1, 10)) return (df + 5) * 3 - 11results_ddf = ddf2.map_partitions(func) results_ddf = results_ddf.persist()wait(results_ddf)
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/notebooks/missing-data.ipynb
import numpy as np import cudfdf = cudf.DataFrame({"a": [1, 2, None, 4], "b": [0.1, None, 2.3, 17.17]})dfdf.isna()df["a"].notna()None == Nonenp.nan == np.nandf["b"] == np.nans = cudf.Series([None, 1, 2])ss == Nones = cudf.Series([1, 2, np.nan], nan_as_null=False)ss == np.nancudf.Series([1, 2, np.nan])cudf.Series([1, 2, np.nan], nan_as_null=False)import pandas as pd datetime_series = cudf.Series( [pd.Timestamp("20120101"), pd.NaT, pd.Timestamp("20120101")] ) datetime_seriesdatetime_series.to_pandas()datetime_series - datetime_seriesdf1 = cudf.DataFrame( { "a": [1, None, 2, 3, None], "b": cudf.Series([np.nan, 2, 3.2, 0.1, 1], nan_as_null=False), } )df2 = cudf.DataFrame( {"a": [1, 11, 2, 34, 10], "b": cudf.Series([0.23, 22, 3.2, None, 1])} )df1df2df1 + df2df1["a"]df1["a"].sum()df1["a"].mean()df1["a"].sum(skipna=False)df1["a"].mean(skipna=False)df1["a"].cumsum()df1["a"].cumsum(skipna=False)cudf.Series([np.nan], nan_as_null=False).sum()cudf.Series([np.nan], nan_as_null=False).sum(skipna=False)cudf.Series([], dtype="float64").sum()cudf.Series([np.nan], nan_as_null=False).prod()cudf.Series([np.nan], nan_as_null=False).prod(skipna=False)cudf.Series([], dtype="float64").prod()df1df1.groupby("a").mean()df1.groupby("a", dropna=False).mean()series = cudf.Series([1, 2, 3, 4])seriesseries[2] = Noneseriesdf1df1["b"].fillna(10)import cupy as cp dff = cudf.DataFrame(cp.random.randn(10, 3), columns=list("ABC"))dff.iloc[3:5, 0] = np.nandff.iloc[4:6, 1] = np.nandff.iloc[5:8, 2] = np.nandffdff.fillna(dff.mean())dff.fillna(dff.mean()[1:3])df1df1.dropna(axis=0)df1.dropna(axis=1)df1["a"].dropna()series = cudf.Series([0.0, 1.0, 2.0, 3.0, 4.0])seriesseries.replace(0, 5)series.replace(0, None)series.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])series.replace({0: 10, 1: 100})df = cudf.DataFrame({"a": [0, 1, 2, 3, 4], "b": [5, 6, 7, 8, 9]})dfdf.replace({"a": 0, "b": 5}, 100)d = {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", None, "d"]}df = cudf.DataFrame(d)dfdf.replace(".", "A Dot")df.replace([".", "b"], ["A Dot", None])df.replace(["a", "."], ["b", "--"])df.replace({"b": "."}, {"b": "replacement value"})df = cudf.DataFrame(cp.random.randn(10, 2))df[np.random.rand(df.shape[0]) > 0.5] = 1.5df.replace(1.5, None)df00 = df.iloc[0, 0]df.replace([1.5, df00], [5, 10])df.replace(1.5, None, inplace=True)df
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/notebooks/guide-to-udfs.ipynb
import numpy as np import cudf from cudf.datasets import randomdata# Create a cuDF series sr = cudf.Series([1, 2, 3])# define a scalar function def f(x): return x + 1sr.apply(f)def g(x, const): return x + const# cuDF apply sr.apply(g, args=(42,))# Create a cuDF series with nulls sr = cudf.Series([1, cudf.NA, 3]) sr# redefine the same function from above def f(x): return x + 1# cuDF result sr.apply(f)def f_null_sensitive(x): # do something if the input is null if x is cudf.NA: return 42 else: return x + 1# cuDF result sr.apply(f_null_sensitive)sr = cudf.Series(["", "abc", "some_example"])def f(st): if len(st) > 0: if st.startswith("a"): return 1 elif "example" in st: return 2 else: return -1 else: return 42result = sr.apply(f) print(result)from cudf.core.udf.utils import set_malloc_heap_size set_malloc_heap_size(int(2e9))df = randomdata(nrows=5, dtypes={"a": int, "b": int, "c": int}, seed=12)from numba import cuda @cuda.jit def multiply(in_col, out_col, multiplier): i = cuda.grid(1) if i < in_col.size: # boundary guard out_col[i] = in_col[i] * multipliersize = len(df["a"]) df["e"] = 0.0 multiply.forall(size)(df["a"], df["e"], 10.0)df.head()def f(row): return row["A"] + row["B"]df = cudf.DataFrame({"A": [1, 2, 3], "B": [4, cudf.NA, 6]}) dfdf.apply(f, axis=1)df.to_pandas(nullable=True).apply(f, axis=1)def f(row): x = row["a"] if x is cudf.NA: return 0 else: return x + 1 df = cudf.DataFrame({"a": [1, cudf.NA, 3]}) dfdf.apply(f, axis=1)def f(row): x = row["a"] y = row["b"] if x + y > 3: return cudf.NA else: return x + y df = cudf.DataFrame({"a": [1, 2, 3], "b": [2, 1, 1]}) dfdf.apply(f, axis=1)def f(row): return row["a"] + row["b"] df = cudf.DataFrame({"a": [1, 2, 3], "b": [0.5, cudf.NA, 3.14]}) dfdf.apply(f, axis=1)def f(row): x = row["a"] if x > 3: return x else: return 1.5 df = cudf.DataFrame({"a": [1, 3, 5]}) dfdf.apply(f, axis=1)def f(row): return row["a"] + (row["b"] - (row["c"] / row["d"])) % row["e"] df = cudf.DataFrame( { "a": [1, 2, 3], "b": [4, 5, 6], "c": [cudf.NA, 4, 4], "d": [8, 7, 8], "e": [7, 1, 6], } ) dfdf.apply(f, axis=1)str_df = cudf.DataFrame( {"str_col": ["abc", "ABC", "Example"], "scale": [1, 2, 3]} ) str_dfdef f(row): st = row["str_col"] scale = row["scale"] if len(st) > 5: return len(st) + scale else: return len(st)result = str_df.apply(f, axis=1) print(result)def conditional_add(x, y, out): for i, (a, e) in enumerate(zip(x, y)): if a > 0: out[i] = a + e else: out[i] = adf = df.apply_rows( conditional_add, incols={"a": "x", "e": "y"}, outcols={"out": np.float64}, kwargs={}, ) df.head()def gpu_add(a, b, out): for i, (x, y) in enumerate(zip(a, b)): out[i] = x + y df = randomdata(nrows=5, dtypes={"a": int, "b": int, "c": int}, seed=12) df.loc[2, "a"] = None df.loc[3, "b"] = None df.loc[1, "c"] = None df.head()df = df.apply_rows( gpu_add, incols=["a", "b"], outcols={"out": np.float64}, kwargs={} ) df.head()ser = cudf.Series([16, 25, 36, 49, 64, 81], dtype="float64") serrolling = ser.rolling(window=3, min_periods=3, center=False) rollingimport math def example_func(window): b = 0 for a in window: b = max(b, math.sqrt(a)) if b == 8: return 100 return brolling.apply(example_func)df2 = cudf.DataFrame() df2["a"] = np.arange(55, 65, dtype="float64") df2["b"] = np.arange(55, 65, dtype="float64") df2.head()rolling = df2.rolling(window=3, min_periods=3, center=False) rolling.apply(example_func)df = randomdata( nrows=10, dtypes={"a": float, "b": bool, "c": str, "e": float}, seed=12 ) df.head()grouped = df.groupby(["b"])def rolling_avg(e, rolling_avg_e): win_size = 3 for i in range(cuda.threadIdx.x, len(e), cuda.blockDim.x): if i < win_size - 1: # If there is not enough data to fill the window, # take the average to be NaN rolling_avg_e[i] = np.nan else: total = 0 for j in range(i - win_size + 1, i + 1): total += e[j] rolling_avg_e[i] = total / win_sizeresults = grouped.apply_grouped( rolling_avg, incols=["e"], outcols=dict(rolling_avg_e=np.float64) ) resultsimport cupy as cp s = cudf.Series([1.0, 2, 3, 4, 10]) arr = cp.asarray(s) arr@cuda.jit def multiply_by_5(x, out): i = cuda.grid(1) if i < x.size: out[i] = x[i] * 5 out = cudf.Series(cp.zeros(len(s), dtype="int32")) multiply_by_5.forall(s.shape[0])(s, out) outout = cp.empty_like(arr) multiply_by_5.forall(arr.size)(arr, out) out
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/.devcontainer/README.md
# cuDF Development Containers This directory contains [devcontainer configurations](https://containers.dev/implementors/json_reference/) for using VSCode to [develop in a container](https://code.visualstudio.com/docs/devcontainers/containers) via the `Remote Containers` [extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) or [GitHub Codespaces](https://github.com/codespaces). This container is a turnkey development environment for building and testing the cuDF C++ and Python libraries. ## Table of Contents * [Prerequisites](#prerequisites) * [Host bind mounts](#host-bind-mounts) * [Launch a Dev Container](#launch-a-dev-container) ## Prerequisites * [VSCode](https://code.visualstudio.com/download) * [VSCode Remote Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) ## Host bind mounts By default, the following directories are bind-mounted into the devcontainer: * `${repo}:/home/coder/cudf` * `${repo}/../.aws:/home/coder/.aws` * `${repo}/../.local:/home/coder/.local` * `${repo}/../.cache:/home/coder/.cache` * `${repo}/../.conda:/home/coder/.conda` * `${repo}/../.config:/home/coder/.config` This ensures caches, configurations, dependencies, and your commits are persisted on the host across container runs. ## Launch a Dev Container To launch a devcontainer from VSCode, open the cuDF repo and select the "Reopen in Container" button in the bottom right:<br/><img src="https://user-images.githubusercontent.com/178183/221771999-97ab29d5-e718-4e5f-b32f-2cdd51bba25c.png"/> Alternatively, open the VSCode command palette (typically `cmd/ctrl + shift + P`) and run the "Rebuild and Reopen in Container" command.
0
rapidsai_public_repos/cudf
rapidsai_public_repos/cudf/.devcontainer/Dockerfile
# syntax=docker/dockerfile:1.5 ARG BASE ARG PYTHON_PACKAGE_MANAGER=conda FROM ${BASE} as pip-base RUN apt update -y \ && DEBIAN_FRONTEND=noninteractive apt install -y \ librdkafka-dev \ && rm -rf /tmp/* /var/tmp/* /var/cache/apt/* /var/lib/apt/lists/*; ENV DEFAULT_VIRTUAL_ENV=rapids FROM ${BASE} as conda-base ENV DEFAULT_CONDA_ENV=rapids FROM ${PYTHON_PACKAGE_MANAGER}-base ARG CUDA ENV CUDAARCHS="RAPIDS" ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}" ARG PYTHON_PACKAGE_MANAGER ENV PYTHON_PACKAGE_MANAGER="${PYTHON_PACKAGE_MANAGER}" ENV PYTHONSAFEPATH="1" ENV PYTHONUNBUFFERED="1" ENV PYTHONDONTWRITEBYTECODE="1" ENV SCCACHE_REGION="us-east-2" ENV SCCACHE_BUCKET="rapids-sccache-devs" ENV VAULT_HOST="https://vault.ops.k8s.rapids.ai" ENV HISTFILE="/home/coder/.cache/._bash_history"
0
rapidsai_public_repos/cudf/.devcontainer
rapidsai_public_repos/cudf/.devcontainer/cuda11.8-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cudf,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cudf/.devcontainer
rapidsai_public_repos/cudf/.devcontainer/cuda12.0-pip/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "pip", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda12.0-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cudf,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cudf/.devcontainer
rapidsai_public_repos/cudf/.devcontainer/cuda12.0-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "12.0", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.0-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cudf,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.0-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0
rapidsai_public_repos/cudf/.devcontainer
rapidsai_public_repos/cudf/.devcontainer/cuda11.8-conda/devcontainer.json
{ "build": { "context": "${localWorkspaceFolder}/.devcontainer", "dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile", "args": { "CUDA": "11.8", "PYTHON_PACKAGE_MANAGER": "conda", "BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-mambaforge-ubuntu22.04" } }, "hostRequirements": {"gpu": "optional"}, "features": { "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/rapidsai/devcontainers/features/rapids-build-utils" ], "initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda11.8-envs}"], "postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"], "workspaceFolder": "/home/coder", "workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cudf,type=bind,consistency=consistent", "mounts": [ "source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent", "source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda11.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent" ], "customizations": { "vscode": { "extensions": [ "ms-python.flake8", "nvidia.nsight-vscode-edition" ] } } }
0