repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/taskflow.cmake | #
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if (NOT TARGET deps::taskflow)
FetchContent_Declare(
deps-taskflow
GIT_REPOSITORY https://github.com/taskflow/taskflow.git
GIT_TAG v3.2.0
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(deps-taskflow)
if (NOT deps-taskflow_POPULATED)
message(STATUS "Fetching taskflow sources")
FetchContent_Populate(deps-taskflow)
message(STATUS "Fetching taskflow sources - done")
endif ()
set(TF_BUILD_TESTS OFF)
set(TF_BUILD_EXAMPLES OFF)
add_subdirectory(${deps-taskflow_SOURCE_DIR} ${deps-taskflow_BINARY_DIR} EXCLUDE_FROM_ALL)
add_library(deps::taskflow INTERFACE IMPORTED GLOBAL)
target_link_libraries(deps::taskflow INTERFACE Taskflow)
set(deps-taskflow_SOURCE_DIR ${deps-taskflow_SOURCE_DIR} CACHE INTERNAL "" FORCE)
mark_as_advanced(deps-taskflow_SOURCE_DIR)
endif ()
| 0 |
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/boost-header-only.cmake | #
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if (NOT TARGET deps::boost-header-only)
set(Boost_VERSION 1.75.0)
set(boost_component_list "interprocess" "config" "intrusive" "move" "assert" "static_assert" "container" "core" "date_time" "smart_ptr" "throw_exception" "utility" "type_traits" "numeric/conversion" "mpl" "preprocessor" "container_hash" "integer" "detail")
FetchContent_Declare(
deps-boost-header-only
GIT_REPOSITORY https://github.com/boostorg/boost.git
GIT_TAG boost-${Boost_VERSION}
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(deps-boost-header-only)
if (NOT deps-boost-header-only_POPULATED)
message(STATUS "Fetching boost-header-only sources")
FetchContent_Populate(deps-boost-header-only)
message(STATUS "Fetching boost-header-only sources - done")
message(STATUS "Applying patch for boost-header-only")
find_package(Git)
if(Git_FOUND OR GIT_FOUND)
execute_process(
COMMAND bash -c "${GIT_EXECUTABLE} reset HEAD --hard && ${GIT_EXECUTABLE} apply ${CMAKE_CURRENT_LIST_DIR}/boost-header-only.patch"
WORKING_DIRECTORY "${deps-boost-header-only_SOURCE_DIR}/libs/interprocess"
RESULT_VARIABLE exec_result
ERROR_VARIABLE exec_error
ERROR_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE exec_output
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(exec_result EQUAL 0)
message(STATUS "Applying patch for boost-header-only - done")
else()
message(STATUS "Applying patch for boost-header-only - failed")
message(FATAL_ERROR "${exec_output}\n${exec_error}")
endif()
endif ()
endif ()
add_library(deps::boost-header-only INTERFACE IMPORTED GLOBAL)
unset(boost_include_string)
# Create a list of components
foreach(item IN LISTS boost_component_list)
set(boost_include_string "${boost_include_string}" "${deps-boost-header-only_SOURCE_DIR}/libs/${item}/include")
endforeach(item)
# https://www.boost.org/doc/libs/1_75_0/doc/html/interprocess.html#interprocess.intro.introduction_building_interprocess
set_target_properties(deps::boost-header-only PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"${boost_include_string}"
INTERFACE_COMPILE_DEFINITIONS
BOOST_DATE_TIME_NO_LIB=1
)
set(deps-boost-header-only_SOURCE_DIR ${deps-boost-header-only_SOURCE_DIR} CACHE INTERNAL "" FORCE)
mark_as_advanced(deps-boost-header-only_SOURCE_DIR)
endif ()
| 0 |
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/pybind11.cmake | #
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if (NOT TARGET deps::pybind11)
FetchContent_Declare(
deps-pybind11
GIT_REPOSITORY https://github.com/pybind/pybind11.git
GIT_TAG v2.11.1
GIT_SHALLOW TRUE
PATCH_COMMAND git apply "${CMAKE_CURRENT_LIST_DIR}/pybind11_pr4857_4877.patch" || true
)
FetchContent_GetProperties(deps-pybind11)
if (NOT deps-pybind11_POPULATED)
message(STATUS "Fetching pybind11 sources")
FetchContent_Populate(deps-pybind11)
message(STATUS "Fetching pybind11 sources - done")
endif ()
# https://pybind11.readthedocs.io/en/stable/compiling.html#configuration-variables
add_subdirectory(${deps-pybind11_SOURCE_DIR} ${deps-pybind11_BINARY_DIR} EXCLUDE_FROM_ALL)
add_library(deps::pybind11 INTERFACE IMPORTED GLOBAL)
target_link_libraries(deps::pybind11 INTERFACE pybind11::module)
set(deps-pybind11_SOURCE_DIR ${deps-pybind11_SOURCE_DIR} CACHE INTERNAL "" FORCE)
mark_as_advanced(deps-pybind11_SOURCE_DIR)
endif ()
| 0 |
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/nvtx3.cmake | #
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if (NOT TARGET deps::nvtx3)
FetchContent_Declare(
deps-nvtx3
GIT_REPOSITORY https://github.com/NVIDIA/NVTX.git
GIT_TAG 3c98c8425b0376fd8653aac7cfc6a864f3897752
# GIT_SHALLOW TRUE # TODO (#168): Uncomment this when the official release of nvtx3-cpp is available
)
FetchContent_GetProperties(deps-nvtx3)
if (NOT deps-nvtx3_POPULATED)
message(STATUS "Fetching nvtx3 sources")
FetchContent_Populate(deps-nvtx3)
message(STATUS "Fetching nvtx3 sources - done")
endif ()
# Create shared library
cucim_set_build_shared_libs(ON) # since nvtx3 is header-only library, this may not needed.
set(BUILD_TESTS OFF)
set(BUILD_BENCHMARKS OFF)
add_subdirectory(${deps-nvtx3_SOURCE_DIR}/cpp ${deps-nvtx3_BINARY_DIR} EXCLUDE_FROM_ALL)
cucim_restore_build_shared_libs()
add_library(deps::nvtx3 INTERFACE IMPORTED GLOBAL)
target_link_libraries(deps::nvtx3 INTERFACE nvtx3-cpp)
set(deps-nvtx3_SOURCE_DIR ${deps-nvtx3_SOURCE_DIR} CACHE INTERNAL "" FORCE)
mark_as_advanced(deps-nvtx3_SOURCE_DIR)
endif ()
| 0 |
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/libcuckoo.cmake | # Apache License, Version 2.0
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if (NOT TARGET deps::libcuckoo)
FetchContent_Declare(
deps-libcuckoo
GIT_REPOSITORY https://github.com/efficient/libcuckoo
GIT_TAG v0.3
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(deps-libcuckoo)
if (NOT deps-libcuckoo_POPULATED)
message(STATUS "Fetching libcuckoo sources")
FetchContent_Populate(deps-libcuckoo)
message(STATUS "Fetching libcuckoo sources - done")
message(STATUS "Applying patch for libcuckoo")
find_package(Git)
if(Git_FOUND OR GIT_FOUND)
execute_process(
COMMAND bash -c "${GIT_EXECUTABLE} reset HEAD --hard && ${GIT_EXECUTABLE} apply ${CMAKE_CURRENT_LIST_DIR}/libcuckoo.patch"
WORKING_DIRECTORY "${deps-libcuckoo_SOURCE_DIR}"
RESULT_VARIABLE exec_result
ERROR_VARIABLE exec_error
ERROR_STRIP_TRAILING_WHITESPACE
OUTPUT_VARIABLE exec_output
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(exec_result EQUAL 0)
message(STATUS "Applying patch for libcuckoo - done")
else()
message(STATUS "Applying patch for libcuckoo - failed")
message(FATAL_ERROR "${exec_output}\n${exec_error}")
endif()
endif ()
endif ()
# Create static library
cucim_set_build_shared_libs(OFF)
add_subdirectory(${deps-libcuckoo_SOURCE_DIR} ${deps-libcuckoo_BINARY_DIR} EXCLUDE_FROM_ALL)
# libcuckoo's CMakeLists.txt is not compatible with `add_subdirectory` method (it uses ${CMAKE_SOURCE_DIR} instead of ${CMAKE_CURRENT_SOURCE_DIR})
# so add include directories explicitly.
target_include_directories(libcuckoo INTERFACE
$<BUILD_INTERFACE:${deps-libcuckoo_SOURCE_DIR}>
)
cucim_restore_build_shared_libs()
add_library(deps::libcuckoo INTERFACE IMPORTED GLOBAL)
target_link_libraries(deps::libcuckoo INTERFACE libcuckoo)
set(deps-libcuckoo_SOURCE_DIR ${deps-libcuckoo_SOURCE_DIR} CACHE INTERNAL "" FORCE)
mark_as_advanced(deps-libcuckoo_SOURCE_DIR)
endif ()
| 0 |
rapidsai_public_repos/cucim/cpp/cmake | rapidsai_public_repos/cucim/cpp/cmake/deps/libcuckoo.patch | diff --git a/libcuckoo/cuckoohash_map.hh b/libcuckoo/cuckoohash_map.hh
index 88f1f43..a36c273 100644
--- a/libcuckoo/cuckoohash_map.hh
+++ b/libcuckoo/cuckoohash_map.hh
@@ -24,6 +24,10 @@
#include <utility>
#include <vector>
+// [cuCIM patch] Include boost interprocess vector/list.
+#include <boost/interprocess/containers/vector.hpp>
+#include <boost/interprocess/containers/list.hpp>
+
#include "cuckoohash_config.hh"
#include "cuckoohash_util.hh"
#include "bucket_container.hh"
@@ -841,8 +845,13 @@ private:
using rebind_alloc =
typename std::allocator_traits<allocator_type>::template rebind_alloc<U>;
- using locks_t = std::vector<spinlock, rebind_alloc<spinlock>>;
- using all_locks_t = std::list<locks_t, rebind_alloc<locks_t>>;
+ // [cuCIM patch] Use boost::interprocess vector and list for using shared
+ // memory with Boost's interprocess module. This is not a portable solution.
+ //
+ // See [cuCIM patch] https://github.com/efficient/libcuckoo/issues/111
+ //
+ using locks_t = boost::interprocess::vector<spinlock, rebind_alloc<spinlock>>;
+ using all_locks_t = boost::interprocess::list<locks_t, rebind_alloc<locks_t>>;
// Classes for managing locked buckets. By storing and moving around sets of
// locked buckets in these classes, we can ensure that they are unlocked
| 0 |
rapidsai_public_repos/cucim/cpp | rapidsai_public_repos/cucim/cpp/src/cuimage.cpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/cuimage.h"
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#if CUCIM_SUPPORT_CUDA
# include <cuda_runtime.h>
#endif
#include <fmt/format.h>
#include "cucim/profiler/nvtx3.h"
#include "cucim/util/cuda.h"
#include "cucim/util/file.h"
#define XSTR(x) STR(x)
#define STR(x) #x
// DLDataType's equality operator implementation
template <>
struct std::hash<DLDataType>
{
size_t operator()(const DLDataType& dtype) const
{
return (dtype.code * 1117) ^ (dtype.bits * 31) ^ (dtype.lanes);
}
};
bool operator==(const DLDataType& lhs, const DLDataType& rhs)
{
return (lhs.code == rhs.code) && (lhs.bits == rhs.bits) && (lhs.lanes == rhs.lanes);
}
bool operator!=(const DLDataType& lhs, const DLDataType& rhs)
{
return (lhs.code != rhs.code) || (lhs.bits != rhs.bits) || (lhs.lanes != rhs.lanes);
}
namespace cucim
{
DimIndices::DimIndices(const char* dims)
{
if (!dims)
{
return;
}
// TODO: check illegal characters
int index = 0;
for (const char* ptr = dims; *ptr != 0; ++ptr, ++index)
{
char dim_char = toupper(*ptr);
dim_indices_.indices[dim_char - 'A'] = index;
}
}
DimIndices::DimIndices(std::vector<std::pair<char, int64_t>> init_list)
{
// TODO: check illegal characters
for (auto& object : init_list)
{
char dim_char = toupper(object.first);
dim_indices_.indices[dim_char - 'A'] = object.second;
}
}
int64_t DimIndices::index(char dim_char) const
{
dim_char = toupper(dim_char);
return dim_indices_.indices[dim_char - 'A'];
}
ResolutionInfo::ResolutionInfo(io::format::ResolutionInfoDesc desc)
{
level_count_ = desc.level_count;
level_ndim_ = desc.level_ndim;
level_dimensions_.insert(
level_dimensions_.end(), &desc.level_dimensions[0], &desc.level_dimensions[level_count_ * level_ndim_]);
level_downsamples_.insert(
level_downsamples_.end(), &desc.level_downsamples[0], &desc.level_downsamples[level_count_]);
level_tile_sizes_.insert(
level_tile_sizes_.end(), &desc.level_tile_sizes[0], &desc.level_tile_sizes[level_count_ * level_ndim_]);
}
uint16_t ResolutionInfo::level_count() const
{
return level_count_;
}
const std::vector<int64_t>& ResolutionInfo::level_dimensions() const
{
return level_dimensions_;
}
std::vector<int64_t> ResolutionInfo::level_dimension(uint16_t level) const
{
if (level >= level_count_)
{
throw std::invalid_argument(fmt::format("'level' should be less than {}", level_count_));
}
std::vector<int64_t> result;
auto start_index = level_dimensions_.begin() + (level * level_ndim_);
result.insert(result.end(), start_index, start_index + level_ndim_);
return result;
}
const std::vector<float>& ResolutionInfo::level_downsamples() const
{
return level_downsamples_;
}
float ResolutionInfo::level_downsample(uint16_t level) const
{
if (level >= level_count_)
{
throw std::invalid_argument(fmt::format("'level' should be less than {}", level_count_));
}
return level_downsamples_.at(level);
}
const std::vector<uint32_t>& ResolutionInfo::level_tile_sizes() const
{
return level_tile_sizes_;
}
std::vector<uint32_t> ResolutionInfo::level_tile_size(uint16_t level) const
{
if (level >= level_count_)
{
throw std::invalid_argument(fmt::format("'level' should be less than {}", level_count_));
}
std::vector<uint32_t> result;
auto start_index = level_tile_sizes_.begin() + (level * level_ndim_);
result.insert(result.end(), start_index, start_index + level_ndim_);
return result;
}
DetectedFormat detect_format(filesystem::Path path)
{
// TODO: implement this
(void)path;
return { "Generic TIFF", { "cucim.kit.cuslide" } };
}
Framework* CuImage::framework_ = cucim::acquire_framework("cucim");
std::unique_ptr<config::Config> CuImage::config_ = std::make_unique<config::Config>();
std::shared_ptr<profiler::Profiler> CuImage::profiler_ = std::make_shared<profiler::Profiler>(config_->profiler());
std::unique_ptr<cache::ImageCacheManager> CuImage::cache_manager_ = std::make_unique<cache::ImageCacheManager>();
std::unique_ptr<plugin::ImageFormat> CuImage::image_format_plugins_ = std::make_unique<plugin::ImageFormat>();
CuImage::CuImage(const filesystem::Path& path)
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cuimage_cuimage, 1));
ensure_init();
image_format_ = image_format_plugins_->detect_image_format(path);
// TODO: need to detect available format for the file path
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_cuimage_open));
std::shared_ptr<CuCIMFileHandle>* file_handle_shared =
reinterpret_cast<std::shared_ptr<CuCIMFileHandle>*>(image_format_->image_parser.open(path.c_str()));
file_handle_ = *file_handle_shared;
delete file_handle_shared;
// Set deleter to close the file handle
file_handle_->set_deleter(image_format_->image_parser.close);
}
io::format::ImageMetadata& image_metadata = *(new io::format::ImageMetadata{});
image_metadata_ = &image_metadata.desc();
is_loaded_ = image_format_->image_parser.parse(file_handle_.get(), image_metadata_);
dim_indices_ = DimIndices(image_metadata_->dims);
auto& associated_image_info = image_metadata_->associated_image_info;
uint16_t image_count = associated_image_info.image_count;
if (image_count != associated_images_.size())
{
for (int i = 0; i < image_count; ++i)
{
associated_images_.emplace(associated_image_info.image_names[i]);
}
}
}
CuImage::CuImage(const filesystem::Path& path, const std::string& plugin_name)
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cuimage_cuimage, 2));
// TODO: implement this
(void)path;
(void)plugin_name;
}
CuImage::CuImage(CuImage&& cuimg) : std::enable_shared_from_this<CuImage>()
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cuimage_cuimage, 3));
std::swap(file_handle_, cuimg.file_handle_);
std::swap(image_format_, cuimg.image_format_);
std::swap(image_metadata_, cuimg.image_metadata_);
std::swap(image_data_, cuimg.image_data_);
std::swap(is_loaded_, cuimg.is_loaded_);
std::swap(dim_indices_, cuimg.dim_indices_);
cuimg.associated_images_.swap(associated_images_);
}
CuImage::CuImage(const CuImage* cuimg,
io::format::ImageMetadataDesc* image_metadata,
cucim::io::format::ImageDataDesc* image_data)
: std::enable_shared_from_this<CuImage>()
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cuimage_cuimage, 4));
file_handle_ = cuimg->file_handle_;
image_format_ = cuimg->image_format_;
image_metadata_ = image_metadata;
image_data_ = image_data;
is_loaded_ = true;
if (image_metadata)
{
dim_indices_ = DimIndices(image_metadata->dims);
}
auto& associated_image_info = image_metadata_->associated_image_info;
uint16_t image_count = associated_image_info.image_count;
if (image_count != associated_images_.size())
{
for (int i = 0; i < image_count; ++i)
{
associated_images_.emplace(associated_image_info.image_names[i]);
}
}
}
CuImage::CuImage() : std::enable_shared_from_this<CuImage>()
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cuimage_cuimage, 5));
file_handle_ = std::make_shared<CuCIMFileHandle>();
file_handle_->path = const_cast<char*>("");
}
CuImage::~CuImage()
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage__cuimage));
if (image_metadata_)
{
// Memory for json_data needs to be manually released if image_metadata_->json_data is not ""
if (image_metadata_->json_data && *image_metadata_->json_data != '\0')
{
cucim_free(image_metadata_->json_data);
image_metadata_->json_data = nullptr;
}
// Delete object (cucim::io::format::ImageMetadata) that embeds image_metadata_
if (image_metadata_->handle)
{
// Keep original handle pointer before clearing it and delete the class object.
void* handle_ptr = image_metadata_->handle;
image_metadata_->handle = nullptr;
delete static_cast<cucim::io::format::ImageMetadata*>(handle_ptr);
}
image_metadata_ = nullptr;
}
if (image_data_)
{
if (image_data_->container.data)
{
DLDevice& device = image_data_->container.device;
auto device_type = static_cast<io::DeviceType>(device.device_type);
switch (device_type)
{
case io::DeviceType::kCPU:
cucim_free(image_data_->container.data);
image_data_->container.data = nullptr;
break;
case io::DeviceType::kCUDA:
if (image_data_->loader)
{
cudaError_t cuda_status;
CUDA_TRY(cudaFree(image_data_->container.data));
}
image_data_->container.data = nullptr;
break;
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type);
break;
}
}
if (image_data_->container.shape)
{
cucim_free(image_data_->container.shape);
image_data_->container.shape = nullptr;
}
if (image_data_->container.strides)
{
cucim_free(image_data_->container.strides);
image_data_->container.strides = nullptr;
}
if (image_data_->shm_name)
{
cucim_free(image_data_->shm_name);
image_data_->shm_name = nullptr;
}
if (image_data_->loader)
{
auto loader = reinterpret_cast<cucim::loader::ThreadBatchDataLoader*>(image_data_->loader);
delete loader;
image_data_->loader = nullptr;
}
cucim_free(image_data_);
image_data_ = nullptr;
}
close(); // close file handle (NOTE:: close the file handle after loader is deleted)
image_format_ = nullptr; // memory release is handled by the framework
}
Framework* CuImage::get_framework()
{
return framework_;
}
config::Config* CuImage::get_config()
{
return config_.get();
}
std::shared_ptr<profiler::Profiler> CuImage::profiler()
{
return profiler_;
}
std::shared_ptr<profiler::Profiler> CuImage::profiler(profiler::ProfilerConfig& config)
{
profiler_->trace(config.trace);
return profiler_;
}
cache::ImageCacheManager& CuImage::cache_manager()
{
return *cache_manager_;
}
std::shared_ptr<cache::ImageCache> CuImage::cache()
{
return cache_manager_->get_cache();
}
std::shared_ptr<cache::ImageCache> CuImage::cache(cache::ImageCacheConfig& config)
{
return cache_manager_->cache(config);
}
bool CuImage::is_trace_enabled()
{
return profiler_->trace();
}
filesystem::Path CuImage::path() const
{
return file_handle_->path == nullptr ? "" : file_handle_->path;
}
bool CuImage::is_loaded() const
{
return is_loaded_;
}
io::Device CuImage::device() const
{
if (image_data_)
{
DLDevice& device = image_data_->container.device;
auto device_type = static_cast<io::DeviceType>(device.device_type);
auto device_id = static_cast<io::DeviceIndex>(device.device_id);
std::string shm_name = image_data_->shm_name == nullptr ? "" : image_data_->shm_name;
return io::Device(device_type, device_id, shm_name);
}
else
{
return io::Device("cpu");
}
}
Metadata CuImage::raw_metadata() const
{
if (image_metadata_ && image_metadata_->raw_data)
{
return Metadata(image_metadata_->raw_data);
}
return Metadata{};
}
Metadata CuImage::metadata() const
{
if (image_metadata_)
{
return Metadata(image_metadata_->json_data);
}
return Metadata{};
}
uint16_t CuImage::ndim() const
{
return image_metadata_->ndim;
}
std::string CuImage::dims() const
{
if (image_metadata_)
{
return image_metadata_->dims;
}
return std::string{};
}
Shape CuImage::shape() const
{
std::vector<int64_t> result_shape;
if (image_metadata_)
{
uint16_t ndim = image_metadata_->ndim;
result_shape.reserve(ndim);
for (int i = 0; i < ndim; ++i)
{
result_shape.push_back(image_metadata_->shape[i]);
}
}
return result_shape;
}
std::vector<int64_t> CuImage::size(std::string dim_order) const
{
std::vector<int64_t> result_size;
if (image_metadata_)
{
if (dim_order.empty())
{
dim_order = std::string(image_metadata_->dims);
}
result_size.reserve(dim_order.size());
for (const char& c : dim_order)
{
auto index = dim_indices_.index(c);
if (index != -1)
{
result_size.push_back(image_metadata_->shape[index]);
}
}
}
return result_size;
}
DLDataType CuImage::dtype() const
{
const memory::DLTContainer img_data = container();
if (img_data)
{
const DLDataType dtype = img_data.dtype();
return dtype;
}
else
{
if (image_metadata_)
{
return image_metadata_->dtype;
}
}
return DLDataType({ DLDataTypeCode::kDLUInt, 8, 1 });
}
std::string CuImage::typestr() const
{
const memory::DLTContainer img_data = container();
if (img_data)
{
const char* type_str = img_data.numpy_dtype();
return std::string(type_str);
}
else
{
if (image_metadata_)
{
return std::string(memory::to_numpy_dtype(image_metadata_->dtype));
}
}
return "|u1";
}
std::vector<std::string> CuImage::channel_names() const
{
std::vector<std::string> channel_names;
if (image_metadata_)
{
auto channel_index = dim_indices_.index('C');
if (channel_index != -1)
{
int channel_size = image_metadata_->shape[channel_index];
channel_names.reserve(channel_size);
for (int i = 0; i < channel_size; ++i)
{
channel_names.emplace_back(std::string(image_metadata_->channel_names[i]));
}
}
}
return channel_names;
}
std::vector<float> CuImage::spacing(std::string dim_order) const
{
std::vector<float> result_spacing;
result_spacing.reserve(dim_order.size());
if (image_metadata_)
{
if (dim_order.empty())
{
dim_order = std::string(image_metadata_->dims);
result_spacing.reserve(dim_order.size());
}
for (const char& c : dim_order)
{
auto index = dim_indices_.index(c);
if (index != -1)
{
result_spacing.push_back(image_metadata_->spacing[index]);
}
else
{
result_spacing.push_back(1.0);
}
}
}
else
{
for (const char& c : dim_order)
{
(void)c;
result_spacing.push_back(1.0);
}
}
return result_spacing;
}
std::vector<std::string> CuImage::spacing_units(std::string dim_order) const
{
std::vector<std::string> result_spacing_units;
result_spacing_units.reserve(dim_order.size());
if (image_metadata_)
{
if (dim_order.empty())
{
dim_order = std::string(image_metadata_->dims);
result_spacing_units.reserve(dim_order.size());
}
for (const char& c : dim_order)
{
auto index = dim_indices_.index(c);
if (index != -1)
{
result_spacing_units.emplace_back(std::string(image_metadata_->spacing_units[index]));
}
else
{
result_spacing_units.emplace_back(std::string(""));
}
}
}
else
{
for (const char& c : dim_order)
{
(void)c;
result_spacing_units.emplace_back(std::string(""));
}
}
return result_spacing_units;
}
std::array<float, 3> CuImage::origin() const
{
std::array<float, 3> result_origin;
if (image_metadata_->origin)
{
std::memcpy(result_origin.data(), image_metadata_->origin, sizeof(float) * 3);
}
return std::array<float, 3>{ 0., 0., 0. };
}
std::array<std::array<float, 3>, 3> CuImage::direction() const
{
std::array<std::array<float, 3>, 3> result_direction;
if (image_metadata_->direction)
{
std::memcpy(result_direction.data(), image_metadata_->direction, sizeof(float) * 9);
return result_direction;
}
else
{
result_direction = { { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 } } };
}
return result_direction;
}
std::string CuImage::coord_sys() const
{
if (image_metadata_->coord_sys)
{
return std::string(image_metadata_->coord_sys);
}
return std::string("LPS");
}
ResolutionInfo CuImage::resolutions() const
{
if (image_metadata_)
{
return ResolutionInfo(image_metadata_->resolution_info);
}
return ResolutionInfo(io::format::ResolutionInfoDesc{});
}
memory::DLTContainer CuImage::container() const
{
if (image_data_)
{
return memory::DLTContainer(&image_data_->container, image_data_->shm_name);
}
else
{
return memory::DLTContainer(nullptr);
}
}
loader::ThreadBatchDataLoader* CuImage::loader() const
{
if (image_data_)
{
return reinterpret_cast<loader::ThreadBatchDataLoader*>(image_data_->loader);
}
else
{
return nullptr;
}
}
CuImage CuImage::read_region(std::vector<int64_t>&& location,
std::vector<int64_t>&& size,
uint16_t level,
uint32_t num_workers,
uint32_t batch_size,
bool drop_last,
uint32_t prefetch_factor,
bool shuffle,
uint64_t seed,
const DimIndices& region_dim_indices,
const io::Device& device,
DLTensor* buf,
const std::string& shm_name) const
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_read_region));
(void)region_dim_indices;
(void)buf;
(void)shm_name;
// If location is not specified, location would be (0, 0) if Z=0. Otherwise, location would be (0, 0, 0)
if (location.empty())
{
location.emplace_back(0);
location.emplace_back(0);
}
const ResolutionInfo& res_info = resolutions();
// If `size` is not specified, size would be (width, height) of the image at the specified `level`.
if (size.empty())
{
const auto level_count = res_info.level_count();
if (level_count == 0)
{
throw std::runtime_error("[Error] No available resolutions in the image!");
}
const auto& level_dimension = res_info.level_dimension(level);
size.insert(size.end(), level_dimension.begin(), level_dimension.end());
}
// The number of locations should be the multiplication of the number of dimensions in the size.
if (location.size() % size.size() != 0)
{
throw std::runtime_error(
"[Error] The number of locations should be the multiplication of the number of dimensions in the size!");
}
// Make sure the batch size is not zero.
if (batch_size == 0)
{
batch_size = 1;
}
// num_workers would be always > 0 if output device type is CUDA
if (num_workers == 0 && device.type() == cucim::io::DeviceType::kCUDA)
{
num_workers = 1;
}
uint32_t size_ndim = size.size();
uint64_t location_len = location.size() / size_ndim;
std::string device_name = std::string(device);
cucim::io::format::ImageReaderRegionRequestDesc request{};
if (location_len > 1 || batch_size > 1 || num_workers > 0)
{
// ::Note:: Here, to pass vector data to C interface, we move data in the original vector to the vector in heap
// memory and create a unique pointer with 'new'. The data is transferred to ThreadBatchDataLoader class members
// (locations_ and size_) for automatic deletion on exit.
auto location_ptr = new std::vector<int64_t>();
location_ptr->swap(location);
auto location_unique = reinterpret_cast<void*>(new std::unique_ptr<std::vector<int64_t>>(location_ptr));
auto size_ptr = new std::vector<int64_t>();
size_ptr->swap(size);
auto size_unique = reinterpret_cast<void*>(new std::unique_ptr<std::vector<int64_t>>(size_ptr));
request.location = location_ptr->data();
request.location_unique = location_unique;
request.size = size_ptr->data();
request.size_unique = size_unique;
}
else
{
request.location = location.data();
request.size = size.data();
}
request.location_len = location_len;
request.size_ndim = size_ndim;
request.level = level;
request.num_workers = num_workers;
request.batch_size = batch_size;
request.drop_last = drop_last;
request.prefetch_factor = prefetch_factor;
request.shuffle = shuffle;
request.seed = seed;
request.device = device_name.data();
auto image_data = std::unique_ptr<io::format::ImageDataDesc, decltype(cucim_free)*>(
reinterpret_cast<io::format::ImageDataDesc*>(cucim_malloc(sizeof(io::format::ImageDataDesc))), cucim_free);
memset(image_data.get(), 0, sizeof(io::format::ImageDataDesc));
try
{
// Read region from internal file if image_data_ is nullptr
if (image_data_ == nullptr)
{
if (!file_handle_) // file_handle_ is not opened
{
throw std::runtime_error("[Error] The image file is closed!");
}
if (!image_format_->image_reader.read(
file_handle_.get(), image_metadata_, &request, image_data.get(), nullptr /*out_metadata*/))
{
throw std::runtime_error("[Error] Failed to read image!");
}
}
else // Read region by cropping image
{
const char* dims_str = image_metadata_->dims;
if (strncmp("YXC", dims_str, 4) != 0)
{
throw std::runtime_error(fmt::format("[Error] The image is not in YXC format! ({})", dims_str));
}
if (image_data_->container.data == nullptr)
{
throw std::runtime_error(
"[Error] The image data is nullptr! It is possible that the object is iterator and the image data "
"is not loaded yet! Please advance the iterator first!");
}
crop_image(request, *image_data);
}
}
catch (std::invalid_argument& e)
{
throw e;
}
//
// Metadata Setup
//
// TODO: fill correct metadata information
io::format::ImageMetadata& out_metadata = *(new io::format::ImageMetadata{});
DLTensor& image_container = image_data->container;
// Note: int-> uint16_t due to type differences between ImageMetadataDesc.ndim and DLTensor.ndim
const uint16_t ndim = image_container.ndim;
auto& resource = out_metadata.get_resource();
std::string_view dims{ "YXC" };
if (batch_size > 1)
{
dims = { "NYXC" };
}
// Information from image_data
std::pmr::vector<int64_t> shape(&resource);
shape.reserve(ndim);
shape.insert(shape.end(), &image_container.shape[0], &image_container.shape[ndim]);
DLDataType& dtype = image_container.dtype;
// TODO: Do not assume channel names as 'RGB' or 'RGBA'
uint8_t n_ch = image_container.shape[2];
std::pmr::vector<std::string_view> channel_names(&resource);
channel_names.reserve(n_ch);
if (n_ch == 3)
{
// std::pmr::vector<std::string_view> channel_names(
// { std::string_view{ "R" }, std::string_view{ "G" }, std::string_view{ "B" } }, &resource);
channel_names.emplace_back(std::string_view{ "R" });
channel_names.emplace_back(std::string_view{ "G" });
channel_names.emplace_back(std::string_view{ "B" });
}
else
{
channel_names.emplace_back(std::string_view{ "R" });
channel_names.emplace_back(std::string_view{ "G" });
channel_names.emplace_back(std::string_view{ "B" });
channel_names.emplace_back(std::string_view{ "A" });
}
std::pmr::vector<float> spacing(&resource);
spacing.reserve(ndim);
float* image_spacing = image_metadata_->spacing;
spacing.insert(spacing.end(), &image_spacing[0], &image_spacing[ndim]);
std::pmr::vector<std::string_view> spacing_units(&resource);
spacing_units.reserve(ndim);
int index = 0;
if (ndim == 4)
{
index = 1;
// The first dimension is for 'batch' ('N')
spacing_units.emplace_back(std::string_view{ "batch" });
}
const auto& level_downsample = res_info.level_downsample(level);
for (; index < ndim; ++index)
{
int64_t dim_index = dim_indices_.index(dims[index]);
if (dim_index < 0)
{
throw std::runtime_error(fmt::format("[Error] Invalid dimension name: {}", dims[index]));
}
const char* str_ptr = image_metadata_->spacing_units[dim_index];
size_t str_len = strlen(image_metadata_->spacing_units[dim_index]);
char* spacing_unit = static_cast<char*>(resource.allocate(str_len + 1));
memcpy(spacing_unit, str_ptr, str_len);
spacing_unit[str_len] = '\0';
// std::pmr::string spacing_unit{ image_metadata_->spacing_units[dim_index], &resource };
spacing_units.emplace_back(std::string_view{ spacing_unit });
// Update spacing based on level_downsample
char dim_char = image_metadata_->dims[dim_index];
switch (dim_char)
{
case 'X':
case 'Y':
spacing[index] /= level_downsample;
break;
default:
break;
}
}
std::pmr::vector<float> origin(&resource);
origin.reserve(3);
float* image_origin = image_metadata_->origin;
origin.insert(origin.end(), &image_origin[0], &image_origin[3]);
// Direction cosines (size is always 3x3)
std::pmr::vector<float> direction(&resource);
direction.reserve(3);
float* image_direction = image_metadata_->direction;
direction.insert(direction.end(), &image_direction[0], &image_direction[3 * 3]);
// The coordinate frame in which the direction cosines are measured (either 'LPS'(ITK/DICOM) or 'RAS'(NIfTI/3D
// Slicer))
std::string_view coord_sys{ "" };
const char* coord_sys_ptr = image_metadata_->coord_sys;
if (coord_sys_ptr)
{
size_t coord_sys_len = strlen(coord_sys_ptr);
char* coord_sys_str = static_cast<char*>(resource.allocate(coord_sys_len + 1));
memcpy(coord_sys_str, coord_sys_ptr, coord_sys_len);
coord_sys_str[coord_sys_len] = '\0';
coord_sys = std::string_view{ coord_sys_str };
}
// std::pmr::string coord_sys_str{ image_metadata_->coord_sys ? image_metadata_->coord_sys : "", &resource };
// std::string_view coord_sys{ coord_sys_str };
// Manually set resolution dimensions to 2
const uint16_t level_ndim = 2;
std::pmr::vector<int64_t> level_dimensions(&resource);
level_dimensions.reserve(level_ndim * 1); // it has only one size
level_dimensions.insert(level_dimensions.end(), request.size, &request.size[request.size_ndim]);
std::pmr::vector<float> level_downsamples(&resource);
level_downsamples.reserve(1);
level_downsamples.emplace_back(1.0);
std::pmr::vector<uint32_t> level_tile_sizes(&resource);
level_tile_sizes.reserve(level_ndim * 1); // it has only one size
level_tile_sizes.insert(level_tile_sizes.end(), request.size, &request.size[request.size_ndim]); // same with
// level_dimension
// Empty associated images
const size_t associated_image_count = 0;
std::pmr::vector<std::string_view> associated_image_names(&resource);
// Partial image doesn't include raw metadata
std::string_view raw_data{ "" };
// Partial image doesn't include json metadata
std::string_view json_data{ "" };
out_metadata.ndim(ndim);
out_metadata.dims(std::move(dims));
out_metadata.shape(std::move(shape));
out_metadata.dtype(dtype);
out_metadata.channel_names(std::move(channel_names));
out_metadata.spacing(std::move(spacing));
out_metadata.spacing_units(std::move(spacing_units));
out_metadata.origin(std::move(origin));
out_metadata.direction(std::move(direction));
out_metadata.coord_sys(std::move(coord_sys));
out_metadata.level_count(1);
out_metadata.level_ndim(2);
out_metadata.level_dimensions(std::move(level_dimensions));
out_metadata.level_downsamples(std::move(level_downsamples));
out_metadata.level_tile_sizes(std::move(level_tile_sizes));
out_metadata.image_count(associated_image_count);
out_metadata.image_names(std::move(associated_image_names));
out_metadata.raw_data(raw_data);
out_metadata.json_data(json_data);
return CuImage(this, &out_metadata.desc(), image_data.release());
}
std::set<std::string> CuImage::associated_images() const
{
return associated_images_;
}
CuImage CuImage::associated_image(const std::string& name, const io::Device& device) const
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_associated_image));
if (file_handle_->fd < 0) // file_handle_ is not opened
{
throw std::runtime_error("[Error] The image file is closed!");
}
auto it = associated_images_.find(name);
if (it != associated_images_.end())
{
io::format::ImageReaderRegionRequestDesc request{};
request.associated_image_name = const_cast<char*>(name.c_str());
std::string device_name = std::string(device);
request.device = device_name.data();
auto out_image_data = std::unique_ptr<io::format::ImageDataDesc, decltype(cucim_free)*>(
reinterpret_cast<io::format::ImageDataDesc*>(cucim_malloc(sizeof(io::format::ImageDataDesc))), cucim_free);
memset(out_image_data.get(), 0, sizeof(io::format::ImageDataDesc));
io::format::ImageMetadata& out_metadata = *(new io::format::ImageMetadata{});
if (!image_format_->image_reader.read(
file_handle_.get(), image_metadata_, &request, out_image_data.get(), &out_metadata.desc()))
{
throw std::runtime_error("[Error] Failed to read image!");
}
return CuImage(this, &out_metadata.desc(), out_image_data.release());
}
return CuImage{};
}
void CuImage::save(std::string file_path) const
{
// Save ppm file for now.
if (image_data_)
{
std::fstream fs(file_path, std::fstream::out | std::fstream::binary);
if (fs.bad())
{
CUCIM_ERROR("Opening file failed!");
}
fs << "P6\n";
auto image_size = size("XY");
auto width = image_size[0];
auto height = image_size[1];
fs << width << "\n" << height << "\n" << 0xff << "\n";
uint8_t* data = static_cast<uint8_t*>(image_data_->container.data);
uint8_t* raster = nullptr;
size_t raster_size = width * height * 3;
const cucim::io::Device& in_device = device();
if (in_device.type() == cucim::io::DeviceType::kCUDA)
{
cudaError_t cuda_status;
raster = static_cast<uint8_t*>(cucim_malloc(raster_size));
CUDA_TRY(cudaMemcpy(raster, data, raster_size, cudaMemcpyDeviceToHost));
if (cuda_status)
{
cucim_free(raster);
throw std::runtime_error("Error during cudaMemcpy!");
}
data = raster;
}
for (unsigned int i = 0; (i < raster_size) && fs.good(); ++i)
{
fs << data[i];
}
fs.flush();
if (fs.bad())
{
if (in_device.type() == cucim::io::DeviceType::kCUDA)
{
cucim_free(raster);
}
CUCIM_ERROR("Writing data failed!");
}
fs.close();
}
}
void CuImage::close()
{
file_handle_ = nullptr;
}
void CuImage::ensure_init()
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_ensure_init));
ScopedLock g(mutex_);
if (!framework_)
{
CUCIM_ERROR("Framework is not initialized!");
}
if (!(*image_format_plugins_))
{
image_format_plugins_ = std::make_unique<cucim::plugin::ImageFormat>();
const std::vector<std::string>& plugin_names = get_config()->plugin().plugin_names;
const char* plugin_root = framework_->get_plugin_root();
for (auto& plugin_name : plugin_names)
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_ensure_init_plugin_iter));
// TODO: Here 'LINUX' path separator is used. Need to make it generalize once filesystem library is
// available.
std::string plugin_file_path = (plugin_root && *plugin_root != 0) ?
fmt::format("{}/{}", plugin_root, plugin_name) :
fmt::format("{}", plugin_name);
if (!cucim::util::file_exists(plugin_file_path.c_str()))
{
plugin_file_path = fmt::format("{}", plugin_name);
}
const auto& image_formats =
framework_->acquire_interface_from_library<cucim::io::format::IImageFormat>(plugin_file_path.c_str());
image_format_plugins_->add_interfaces(image_formats);
if (image_formats == nullptr)
{
throw std::runtime_error(fmt::format("Dependent library '{}' cannot be loaded!", plugin_file_path));
}
}
}
}
bool CuImage::crop_image(const io::format::ImageReaderRegionRequestDesc& request,
io::format::ImageDataDesc& out_image_data) const
{
PROF_SCOPED_RANGE(PROF_EVENT(cuimage_crop_image));
const int32_t ndim = request.size_ndim;
if (request.level >= image_metadata_->resolution_info.level_count)
{
throw std::invalid_argument(fmt::format("Invalid level ({}) in the request! (Should be < {})", request.level,
image_metadata_->resolution_info.level_count));
}
const cucim::io::Device& in_device = device();
auto original_img_width = image_metadata_->shape[dim_indices_.index('X')];
auto original_img_height = image_metadata_->shape[dim_indices_.index('Y')];
// TODO: consider other cases where samples_per_pixel is not same with # of channels
// (we cannot use `ifd->samples_per_pixel()` here)
uint32_t samples_per_pixel = static_cast<uint32_t>(image_metadata_->shape[dim_indices_.index('C')]);
for (int32_t i = 0; i < ndim; ++i)
{
if (request.location[i] < 0)
{
throw std::invalid_argument(
fmt::format("Invalid location ({}) in the request! (Should be >= 0)", request.location[i]));
}
if (request.size[i] <= 0)
{
throw std::invalid_argument(fmt::format("Invalid size ({}) in the request! (Should be > 0)", request.size[i]));
}
}
if (request.location[0] + request.size[0] > original_img_width)
{
throw std::invalid_argument(
fmt::format("Invalid location/size (it exceeds the image width {})", original_img_width));
}
if (request.location[1] + request.size[1] > original_img_height)
{
throw std::invalid_argument(
fmt::format("Invalid location/size (it exceeds the image height {})", original_img_height));
}
std::string device_name(request.device);
if (request.shm_name)
{
device_name = device_name + fmt::format("[{}]", request.shm_name); // TODO: check performance
}
cucim::io::Device out_device(device_name);
int64_t sx = request.location[0];
int64_t sy = request.location[1];
int64_t w = request.size[0];
int64_t h = request.size[1];
uint64_t ex = sx + w - 1;
uint64_t ey = sy + h - 1;
uint8_t* src_ptr = static_cast<uint8_t*>(image_data_->container.data);
size_t raster_size = w * h * samples_per_pixel;
void* raster = nullptr;
int64_t dest_stride_x_bytes = w * samples_per_pixel;
int64_t src_stride_x = original_img_width;
int64_t src_stride_x_bytes = original_img_width * samples_per_pixel;
int64_t start_offset = (sx + (sy * src_stride_x)) * samples_per_pixel;
int64_t end_offset = (ex + (ey * src_stride_x)) * samples_per_pixel;
switch (in_device.type())
{
case cucim::io::DeviceType::kCPU: {
raster = cucim_malloc(raster_size);
auto dest_ptr = static_cast<uint8_t*>(raster);
for (int64_t src_offset = start_offset; src_offset <= end_offset; src_offset += src_stride_x_bytes)
{
memcpy(dest_ptr, src_ptr + src_offset, dest_stride_x_bytes);
dest_ptr += dest_stride_x_bytes;
}
// Copy the raster memory and free it if needed.
cucim::memory::move_raster_from_host((void**)&raster, raster_size, out_device);
break;
}
case cucim::io::DeviceType::kCUDA: {
cudaError_t cuda_status;
if (out_device.type() == cucim::io::DeviceType::kCPU)
{
// cuda -> host at bulk then host -> host per row is faster than cuda-> cuda per row, then cuda->host at
// bulk.
uint8_t* copied_src_ptr = static_cast<uint8_t*>(cucim_malloc(src_stride_x_bytes * h));
CUDA_TRY(cudaMemcpy(copied_src_ptr, src_ptr + start_offset, src_stride_x_bytes * h, cudaMemcpyDeviceToHost));
if (cuda_status)
{
cucim_free(copied_src_ptr);
throw std::runtime_error("Error during cudaMemcpy!");
}
end_offset -= start_offset;
start_offset = 0;
raster = cucim_malloc(raster_size);
auto dest_ptr = static_cast<uint8_t*>(raster);
for (int64_t src_offset = start_offset; src_offset <= end_offset; src_offset += src_stride_x_bytes)
{
memcpy(dest_ptr, copied_src_ptr + src_offset, dest_stride_x_bytes);
dest_ptr += dest_stride_x_bytes;
}
cucim_free(copied_src_ptr);
}
else
{
CUDA_TRY(cudaMalloc(&raster, raster_size));
if (cuda_status)
{
throw std::bad_alloc();
}
auto dest_ptr = static_cast<uint8_t*>(raster);
CUDA_TRY(cudaMemcpy2D(dest_ptr, dest_stride_x_bytes, src_ptr + start_offset, src_stride_x_bytes,
dest_stride_x_bytes, h, cudaMemcpyDeviceToDevice));
if (cuda_status)
{
throw std::runtime_error("Error during cudaMemcpy2D!");
}
// Copy the raster memory and free it if needed.
cucim::memory::move_raster_from_device((void**)&raster, raster_size, out_device);
}
break;
}
case cucim::io::DeviceType::kCUDAHost:
case cucim::io::DeviceType::kCUDAManaged:
case cucim::io::DeviceType::kCPUShared:
case cucim::io::DeviceType::kCUDAShared:
throw std::runtime_error(fmt::format("Device type {} not supported!", in_device.type()));
break;
}
auto& out_image_container = out_image_data.container;
out_image_container.data = raster;
out_image_container.device = DLDevice{ static_cast<DLDeviceType>(out_device.type()), out_device.index() };
out_image_container.ndim = image_metadata_->ndim;
out_image_container.dtype = image_metadata_->dtype;
out_image_container.strides = nullptr; // Tensor is compact and row-majored
out_image_container.byte_offset = 0;
// Set correct shape
out_image_container.shape = static_cast<int64_t*>(cucim_malloc(sizeof(int64_t) * image_metadata_->ndim));
memcpy(out_image_container.shape, image_metadata_->shape, sizeof(int64_t) * image_metadata_->ndim);
out_image_container.shape[0] = h;
out_image_container.shape[1] = w;
auto& shm_name = out_device.shm_name();
size_t shm_name_len = shm_name.size();
if (shm_name_len != 0)
{
out_image_data.shm_name = static_cast<char*>(cucim_malloc(shm_name_len + 1));
memcpy(out_image_data.shm_name, shm_name.c_str(), shm_name_len + 1);
}
else
{
out_image_data.shm_name = nullptr;
}
return true;
}
/////////////////////////////
// Iterator implementation //
/////////////////////////////
CuImage::iterator CuImage::begin()
{
return iterator(shared_from_this());
}
CuImage::iterator CuImage::end()
{
return iterator(shared_from_this(), true);
}
CuImage::const_iterator CuImage::begin() const
{
return const_iterator(shared_from_this());
}
CuImage::const_iterator CuImage::end() const
{
return const_iterator(shared_from_this(), true);
}
template <typename DataType>
CuImageIterator<DataType>::CuImageIterator(std::shared_ptr<DataType> cuimg, bool ending)
: cuimg_(cuimg), loader_(nullptr), batch_index_(0), total_batch_count_(0)
{
if (!cuimg_)
{
throw std::runtime_error("CuImageIterator: cuimg is nullptr!");
}
auto& image_data = cuimg_->image_data_;
cucim::loader::ThreadBatchDataLoader* loader = nullptr;
if (image_data)
{
loader = reinterpret_cast<cucim::loader::ThreadBatchDataLoader*>(image_data->loader);
loader_ = loader;
}
if (ending) // point to the end
{
if (image_data)
{
if (loader)
{
total_batch_count_ = loader->total_batch_count();
batch_index_ = total_batch_count_;
}
else
{
total_batch_count_ = 1;
batch_index_ = 1;
}
}
else
{
batch_index_ = 0;
}
}
else
{
if (image_data)
{
if (loader)
{
total_batch_count_ = loader->total_batch_count();
if (loader->size() > 1)
{
batch_index_ = loader->processed_batch_count();
}
else
{
batch_index_ = 0;
}
}
else
{
total_batch_count_ = 1;
batch_index_ = 0;
}
}
else
{
throw std::out_of_range("Batch index out of range! ('image_data_' is null)");
}
}
}
template <typename DataType>
typename CuImageIterator<DataType>::reference CuImageIterator<DataType>::operator*() const
{
return cuimg_;
}
template <typename DataType>
typename CuImageIterator<DataType>::pointer CuImageIterator<DataType>::operator->()
{
return cuimg_.get();
}
template <typename DataType>
CuImageIterator<DataType>& CuImageIterator<DataType>::operator++()
{
// Prefix increment
increase_index_();
return *this;
}
template <typename DataType>
CuImageIterator<DataType> CuImageIterator<DataType>::operator++(int)
{
// Postfix increment
auto temp(*this);
increase_index_();
return temp;
}
template <typename DataType>
bool CuImageIterator<DataType>::operator==(const CuImageIterator<DataType>& other)
{
return cuimg_.get() == other.cuimg_.get() && batch_index_ == other.batch_index_;
};
template <typename DataType>
bool CuImageIterator<DataType>::operator!=(const CuImageIterator<DataType>& other)
{
return cuimg_.get() != other.cuimg_.get() || batch_index_ != other.batch_index_;
};
template <typename DataType>
int64_t CuImageIterator<DataType>::index()
{
auto loader = reinterpret_cast<cucim::loader::ThreadBatchDataLoader*>(loader_);
if (loader && (loader->size() > 1))
{
batch_index_ = loader->processed_batch_count();
}
return batch_index_;
}
template <typename DataType>
uint64_t CuImageIterator<DataType>::size() const
{
return total_batch_count_;
}
template <typename DataType>
void CuImageIterator<DataType>::increase_index_()
{
auto loader = reinterpret_cast<cucim::loader::ThreadBatchDataLoader*>(loader_);
if (loader)
{
auto next_data = loader->next_data();
if (next_data)
{
auto& image_data = cuimg_->image_data_;
auto image_data_ptr = reinterpret_cast<uint8_t**>(&(image_data->container.data));
DLDevice& device = image_data->container.device;
auto device_type = static_cast<io::DeviceType>(device.device_type);
switch (device_type)
{
case io::DeviceType::kCPU:
if (*image_data_ptr)
{
cucim_free(*image_data_ptr);
}
break;
case io::DeviceType::kCUDA:
if (*image_data_ptr)
{
cudaError_t cuda_status;
CUDA_ERROR(cudaFree(*image_data_ptr));
}
break;
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type);
break;
}
*image_data_ptr = next_data;
if (loader->batch_size() > 1)
{
// Set value for dimension 'N'
cuimg_->image_data_->container.shape[0] = loader->data_batch_size();
cuimg_->image_metadata_->shape[0] = loader->data_batch_size();
}
}
if (loader->size() > 1)
{
batch_index_ = loader->processed_batch_count();
}
else
{
if (batch_index_ < static_cast<int64_t>(total_batch_count_))
{
++batch_index_;
}
}
}
else
{
if (batch_index_ < static_cast<int64_t>(total_batch_count_))
{
++batch_index_;
}
}
}
} // namespace cucim
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/util/file.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/util/file.h"
#include <sys/stat.h>
namespace cucim::util
{
bool file_exists(const char* path)
{
struct stat st_buff;
return stat(path, &st_buff) == 0;
}
} // namespace cucim::util
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/util/platform.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/util/platform.h"
#include <stdio.h>
#include <string.h>
#include <sys/utsname.h>
namespace cucim::util
{
bool is_in_wsl()
{
struct utsname buf;
int err = uname(&buf);
if (err == 0)
{
char* pos = strstr(buf.release, "icrosoft");
if (pos)
{
// 'Microsoft' for WSL1 and 'microsoft' for WSL2
if (buf.release < pos && (pos[-1] == 'm' || pos[-1] == 'M'))
{
return true;
}
}
}
return false;
}
} // namespace cucim::util
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/config/config.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/config/config.h"
#include "cucim/cache/cache_type.h"
#include "cucim/util/file.h"
#include <fmt/format.h>
#include <nlohmann/json.hpp>
#include <iostream>
#include <fstream>
#include <filesystem>
using json = nlohmann::json;
namespace cucim::config
{
Config::Config()
{
std::string config_path = get_config_path();
bool is_configured_from_file = false;
if (!config_path.empty())
{
is_configured_from_file = parse_config(config_path);
}
if (is_configured_from_file)
{
source_path_ = config_path;
}
else
{
set_default_configuration();
}
// Override config with environment variables
override_from_envs();
init_configs();
}
cucim::cache::ImageCacheConfig& Config::cache()
{
return cache_;
}
cucim::plugin::PluginConfig& Config::plugin()
{
return plugin_;
}
cucim::profiler::ProfilerConfig& Config::profiler()
{
return profiler_;
}
std::string Config::shm_name() const
{
return fmt::format("cucim-shm.{}", pgid());
}
pid_t Config::pid() const
{
return getpid();
}
pid_t Config::ppid() const
{
return getppid();
}
pid_t Config::pgid() const
{
return getpgid(getpid());
}
std::string Config::get_config_path() const
{
// Read config file from:
// 1. A path specified by 'CUCIM_CONFIG_PATH'
// 2. (current folder)/.cucim.json
// 3. $HOME/.cucim.json
std::string config_path;
if (const char* env_p = std::getenv("CUCIM_CONFIG_PATH"))
{
if (cucim::util::file_exists(env_p))
{
config_path = env_p;
}
}
if (config_path.empty() && cucim::util::file_exists(kDefaultConfigFileName))
{
config_path = kDefaultConfigFileName;
}
if (config_path.empty())
{
if (const char* env_p = std::getenv("HOME"))
{
auto home_path = (std::filesystem::path(env_p) / kDefaultConfigFileName).string();
if (cucim::util::file_exists(home_path.c_str()))
{
config_path = home_path;
}
}
}
return config_path;
}
bool Config::parse_config(std::string& path)
{
try
{
std::ifstream ifs(path);
json obj = json::parse(ifs, nullptr /*cb*/, true /*allow_exceptions*/, true /*ignore_comments*/);
json cache = obj["cache"];
if (cache.is_object())
{
cache_.load_config(&cache);
}
json plugin = obj["plugin"];
if (plugin.is_object())
{
plugin_.load_config(&plugin);
}
json profiler = obj["profiler"];
if (profiler.is_object())
{
profiler_.load_config(&profiler);
}
}
catch (const json::parse_error& e)
{
fmt::print(stderr,
"Failed to load configuration file: {}\n"
" message: {}\n"
" exception id: {}\n"
" byte position of error: {}\n",
path, e.what(), e.id, e.byte);
return false;
}
return true;
}
void Config::set_default_configuration()
{
// Override if the initializer of Config class is not enough.
}
void Config::override_from_envs()
{
if (const char* env_p = std::getenv("CUCIM_TRACE"))
{
if (env_p)
{
if (env_p[0] == '1')
{
profiler_.trace = true;
}
else
{
profiler_.trace = false;
}
}
}
}
void Config::init_configs()
{
// Initialization if needed.
}
} // namespace cucim::config
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_manager.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/cache/image_cache_manager.h"
#include "image_cache_empty.h"
#include "image_cache_per_process.h"
#include "image_cache_shared_memory.h"
#include "cucim/cuimage.h"
#include "cucim/profiler/nvtx3.h"
#include <cstdlib>
#include <fmt/format.h>
namespace cucim::cache
{
uint32_t preferred_memory_capacity(const std::vector<uint64_t>& image_size,
const std::vector<uint32_t>& tile_size,
const std::vector<uint32_t>& patch_size,
uint32_t bytes_per_pixel)
{
// https://godbolt.org/z/eMf946oE7 for test
if (image_size.size() != 2 || tile_size.size() != 2 || patch_size.size() != 2)
{
throw std::invalid_argument(
fmt::format("Please specify arguments with correct size (image_size:{}, tile_size:{}, patch_size:{})!",
image_size.size(), tile_size.size(), patch_size.size()));
}
// Number of tiles (x-axis)
uint32_t tile_accross_count = (image_size[0] + (tile_size[0] - 1)) / tile_size[0];
// The maximal number of tiles (y-axis) overapped with the given patch
uint32_t patch_down_count =
std::min(image_size[1] + (tile_size[1] - 1), static_cast<uint64_t>(patch_size[1] + (tile_size[1] - 1))) /
tile_size[1] +
1;
// (tile_accross_count) x (tile width) x (tile_height) x (patch_down_count) x (bytes per pixel)
uint64_t bytes_needed =
(static_cast<uint64_t>(tile_accross_count) * tile_size[0] * tile_size[1] * patch_down_count * bytes_per_pixel);
uint32_t result = bytes_needed / kOneMiB;
return (bytes_needed % kOneMiB == 0) ? result : result + 1;
}
ImageCacheManager::ImageCacheManager() : cache_(create_cache())
{
}
ImageCache& ImageCacheManager::cache() const
{
return *cache_;
}
std::shared_ptr<cucim::cache::ImageCache> ImageCacheManager::cache(const ImageCacheConfig& config)
{
cache_ = create_cache(config);
return cache_;
}
std::shared_ptr<cucim::cache::ImageCache> ImageCacheManager::get_cache() const
{
return cache_;
}
void ImageCacheManager::reserve(uint32_t new_memory_capacity)
{
ImageCacheConfig cache_config;
cache_config.memory_capacity = new_memory_capacity;
cache_config.capacity = calc_default_cache_capacity(kOneMiB * new_memory_capacity);
cache_->reserve(cache_config);
}
void ImageCacheManager::reserve(uint32_t new_memory_capacity, uint32_t new_capacity)
{
ImageCacheConfig cache_config;
cache_config.memory_capacity = new_memory_capacity;
cache_config.capacity = new_capacity;
cache_->reserve(cache_config);
}
std::unique_ptr<ImageCache> ImageCacheManager::create_cache(const ImageCacheConfig& cache_config,
const cucim::io::DeviceType device_type)
{
PROF_SCOPED_RANGE(PROF_EVENT(image_cache_create_cache));
switch (cache_config.type)
{
case CacheType::kNoCache:
return std::make_unique<EmptyImageCache>(cache_config);
case CacheType::kPerProcess:
return std::make_unique<PerProcessImageCache>(cache_config, device_type);
case CacheType::kSharedMemory:
return std::make_unique<SharedMemoryImageCache>(cache_config, device_type);
default:
return std::make_unique<EmptyImageCache>(cache_config);
}
}
std::unique_ptr<ImageCache> ImageCacheManager::create_cache() const
{
ImageCacheConfig& cache_config = cucim::CuImage::get_config()->cache();
return create_cache(cache_config);
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_shared_memory.h | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_CACHE_IMAGE_CACHE_SHARED_MEMORY_H
#define CUCIM_CACHE_IMAGE_CACHE_SHARED_MEMORY_H
#include "cucim/cache/image_cache.h"
#include <boost/container_hash/hash.hpp>
#include <boost/interprocess/smart_ptr/unique_ptr.hpp>
#include <boost/interprocess/smart_ptr/shared_ptr.hpp>
#include <boost/interprocess/allocators/allocator.hpp>
#include <boost/interprocess/managed_shared_memory.hpp>
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <libcuckoo/cuckoohash_map.hh>
#include <atomic>
#include <type_traits>
#include <scoped_allocator>
namespace cucim::cache
{
// Forward declarations
struct ImageCacheItemDetail;
struct SharedMemoryImageCacheValue : public ImageCacheValue
{
SharedMemoryImageCacheValue(void* data,
uint64_t size,
void* user_obj = nullptr,
const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU);
~SharedMemoryImageCacheValue() override;
};
template <class T>
struct shared_mem_deleter
{
shared_mem_deleter(std::unique_ptr<boost::interprocess::managed_shared_memory>& segment);
void operator()(T* p);
private:
std::unique_ptr<boost::interprocess::managed_shared_memory>& seg_;
};
template <class T>
using boost_unique_ptr = std::unique_ptr<T, shared_mem_deleter<T>>;
template <class T>
using boost_shared_ptr = boost::interprocess::shared_ptr<
T,
boost::interprocess::allocator<
void,
boost::interprocess::segment_manager<char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>,
boost::interprocess::iset_index>>,
boost::interprocess::deleter<
T,
boost::interprocess::segment_manager<char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>,
boost::interprocess::iset_index>>>;
using MapKey = boost::interprocess::managed_shared_ptr<ImageCacheKey, boost::interprocess::managed_shared_memory>;
using MapValue =
boost::interprocess::managed_shared_ptr<ImageCacheItemDetail, boost::interprocess::managed_shared_memory>;
using KeyValuePair = std::pair<MapKey, MapValue>;
using ImageCacheAllocator =
boost::interprocess::allocator<KeyValuePair, boost::interprocess::managed_shared_memory::segment_manager>;
using ValueAllocator = std::scoped_allocator_adaptor<
boost::interprocess::allocator<MapValue::type, boost::interprocess::managed_shared_memory::segment_manager>>;
using MapKeyHasher = boost::hash<MapKey>;
using MakKeyEqual = std::equal_to<MapKey>;
using ImageCacheType =
libcuckoo::cuckoohash_map<MapKey::type, MapValue::type, boost::hash<MapKey>, std::equal_to<MapKey>, ImageCacheAllocator>;
using QueueType = std::vector<MapValue::type, ValueAllocator>;
template <class T>
using cache_item_type = boost::interprocess::shared_ptr<
T,
boost::interprocess::allocator<
void,
boost::interprocess::segment_manager<
char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family,
boost::interprocess::offset_ptr<void, std::ptrdiff_t, uintptr_t, 0UL>,
0UL>,
boost::interprocess::iset_index>>,
boost::interprocess::deleter<
T,
boost::interprocess::segment_manager<
char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family,
boost::interprocess::offset_ptr<void, std::ptrdiff_t, uintptr_t, 0UL>,
0UL>,
boost::interprocess::iset_index>>>;
/**
* @brief Image Cache for loading tiles.
*
* FIFO is used for cache replacement policy here.
*
*/
class SharedMemoryImageCache : public ImageCache
{
public:
SharedMemoryImageCache(const ImageCacheConfig& config,
const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU);
~SharedMemoryImageCache();
const char* type_str() const override;
std::shared_ptr<ImageCacheKey> create_key(uint64_t file_hash, uint64_t index) override;
std::shared_ptr<ImageCacheValue> create_value(
void* data, uint64_t size, const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU) override;
void* allocate(std::size_t n) override;
void lock(uint64_t index) override;
void unlock(uint64_t index) override;
void* mutex(uint64_t index) override;
bool insert(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value) override;
void remove_front() override;
uint32_t size() const override;
uint64_t memory_size() const override;
uint32_t capacity() const override;
uint64_t memory_capacity() const override;
uint64_t free_memory() const override;
void record(bool value) override;
bool record() const override;
uint64_t hit_count() const override;
uint64_t miss_count() const override;
void reserve(const ImageCacheConfig& config) override;
std::shared_ptr<ImageCacheValue> find(const std::shared_ptr<ImageCacheKey>& key) override;
private:
bool is_list_full() const;
bool is_memory_full(uint64_t additional_size = 0) const;
void push_back(cache_item_type<ImageCacheItemDetail>& item);
bool erase(const std::shared_ptr<ImageCacheKey>& key);
std::shared_ptr<ImageCacheItemDetail> create_cache_item(std::shared_ptr<ImageCacheKey>& key,
std::shared_ptr<ImageCacheValue>& value);
static bool remove_shmem();
uint32_t calc_hashmap_capacity(uint32_t capacity);
std::unique_ptr<boost::interprocess::managed_shared_memory> create_segment(const ImageCacheConfig& config);
std::unique_ptr<boost::interprocess::managed_shared_memory> segment_;
// boost_unique_ptr<boost::interprocess::interprocess_mutex> mutex_array_;
boost::interprocess::interprocess_mutex* mutex_array_ = nullptr;
boost_unique_ptr<std::atomic<uint64_t>> size_nbytes_; /// size of cache;
/// memory used
boost_unique_ptr<uint64_t> capacity_nbytes_; /// size of cache memory allocated
boost_unique_ptr<uint32_t> capacity_; /// capacity of hashmap
boost_unique_ptr<uint32_t> list_capacity_; /// capacity of list
boost_unique_ptr<uint32_t> list_padding_; /// gap between head and tail
boost_unique_ptr<uint32_t> mutex_pool_capacity_; /// capacity of mutex pool
boost_unique_ptr<std::atomic<uint64_t>> stat_hit_; /// cache hit count
boost_unique_ptr<std::atomic<uint64_t>> stat_miss_; /// cache miss mcount
boost_unique_ptr<bool> stat_is_recorded_; /// whether if cache stat is recorded or not
boost_unique_ptr<std::atomic<uint32_t>> list_head_; /// head
boost_unique_ptr<std::atomic<uint32_t>> list_tail_; /// tail
boost_shared_ptr<QueueType> list_;
boost_shared_ptr<ImageCacheType> hashmap_;
};
} // namespace cucim::cache
#endif // CUCIM_CACHE_IMAGE_CACHE_SHARED_MEMORY_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_per_process.h | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_CACHE_IMAGE_CACHE_PER_PROCESS_H
#define CUCIM_CACHE_IMAGE_CACHE_PER_PROCESS_H
#include "cucim/cache/image_cache.h"
#include <libcuckoo/cuckoohash_map.hh>
#include <memory>
#include <array>
namespace std
{
template <>
struct hash<std::shared_ptr<cucim::cache::ImageCacheKey>>
{
size_t operator()(const std::shared_ptr<cucim::cache::ImageCacheKey>& s) const;
};
template <>
struct equal_to<std::shared_ptr<cucim::cache::ImageCacheKey>>
{
bool operator()(const std::shared_ptr<cucim::cache::ImageCacheKey>& lhs,
const std::shared_ptr<cucim::cache::ImageCacheKey>& rhs) const;
};
} // namespace std
namespace cucim::cache
{
// Forward declarations
struct PerProcessImageCacheItem;
struct PerProcessImageCacheValue : public ImageCacheValue
{
PerProcessImageCacheValue(void* data,
uint64_t size,
void* user_obj = nullptr,
const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU);
~PerProcessImageCacheValue() override;
};
/**
* @brief Image Cache for loading tiles.
*
* FIFO is used for cache replacement policy here.
*
*/
class PerProcessImageCache : public ImageCache
{
public:
PerProcessImageCache(const ImageCacheConfig& config,
const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU);
~PerProcessImageCache();
const char* type_str() const override;
std::shared_ptr<ImageCacheKey> create_key(uint64_t file_hash, uint64_t index) override;
std::shared_ptr<ImageCacheValue> create_value(
void* data, uint64_t size, const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU) override;
void* allocate(std::size_t n) override;
void lock(uint64_t index) override;
void unlock(uint64_t index) override;
void* mutex(uint64_t index) override;
bool insert(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value) override;
void remove_front() override;
uint32_t size() const override;
uint64_t memory_size() const override;
uint32_t capacity() const override;
uint64_t memory_capacity() const override;
uint64_t free_memory() const override;
void record(bool value) override;
bool record() const override;
uint64_t hit_count() const override;
uint64_t miss_count() const override;
void reserve(const ImageCacheConfig& config) override;
std::shared_ptr<ImageCacheValue> find(const std::shared_ptr<ImageCacheKey>& key) override;
private:
bool is_list_full() const;
bool is_memory_full(uint64_t additional_size = 0) const;
void push_back(std::shared_ptr<PerProcessImageCacheItem>& item);
bool erase(const std::shared_ptr<ImageCacheKey>& key);
std::vector<std::mutex> mutex_array_;
std::atomic<uint64_t> size_nbytes_ = 0; /// size of cache memory used
uint64_t capacity_nbytes_ = 0; /// size of cache memory allocated
uint32_t capacity_ = 0; /// capacity of hashmap
uint32_t list_capacity_ = 0; /// capacity of list
uint32_t list_padding_ = 0; /// gap between head and tail
uint32_t mutex_pool_capacity_ = 0; /// capacity of mutex pool
std::atomic<uint64_t> stat_hit_ = 0; /// cache hit count
std::atomic<uint64_t> stat_miss_ = 0; /// cache miss mcount
bool stat_is_recorded_ = false; /// whether if cache stat is recorded or not
std::atomic<uint32_t> list_head_ = 0; /// head
std::atomic<uint32_t> list_tail_ = 0; /// tail
std::vector<std::shared_ptr<PerProcessImageCacheItem>> list_; /// circular list using vector
libcuckoo::cuckoohash_map<std::shared_ptr<ImageCacheKey>, std::shared_ptr<PerProcessImageCacheItem>> hashmap_; /// hashmap
/// using
/// libcuckoo
};
} // namespace cucim::cache
#endif // CUCIM_CACHE_IMAGE_CACHE_PER_PROCESS_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/cache/image_cache.h"
#include "cucim/cuimage.h"
namespace cucim::cache
{
ImageCacheKey::ImageCacheKey(uint64_t file_hash, uint64_t index) : file_hash(file_hash), location_hash(index)
{
}
ImageCacheValue::ImageCacheValue(void* data, uint64_t size, void* user_obj, const cucim::io::DeviceType device_type)
: data(data), size(size), user_obj(user_obj), device_type(device_type)
{
}
ImageCacheValue::operator bool() const
{
return data != nullptr;
}
ImageCache::ImageCache(const ImageCacheConfig& config, CacheType type, const cucim::io::DeviceType device_type)
: type_(type), device_type_(device_type), config_(config){};
CacheType ImageCache::type() const
{
return type_;
}
const char* ImageCache::type_str() const
{
return "nocache";
}
cucim::io::DeviceType ImageCache::device_type() const
{
return device_type_;
}
ImageCacheConfig& ImageCache::config()
{
return config_;
}
ImageCacheConfig ImageCache::get_config() const
{
return config_;
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_per_process.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "image_cache_per_process.h"
#include "cucim/cache/image_cache.h"
#include "cucim/memory/memory_manager.h"
#include "cucim/util/cuda.h"
#include <fmt/format.h>
namespace std
{
size_t hash<std::shared_ptr<cucim::cache::ImageCacheKey>>::operator()(
const std::shared_ptr<cucim::cache::ImageCacheKey>& s) const
{
std::size_t h1 = std::hash<uint64_t>{}(s->file_hash);
std::size_t h2 = std::hash<uint64_t>{}(s->location_hash);
return h1 ^ (h2 << 1); // or use boost::hash_combine
}
bool equal_to<std::shared_ptr<cucim::cache::ImageCacheKey>>::operator()(
const std::shared_ptr<cucim::cache::ImageCacheKey>& lhs, const std::shared_ptr<cucim::cache::ImageCacheKey>& rhs) const
{
return lhs->location_hash == rhs->location_hash;
}
} // namespace std
namespace cucim::cache
{
struct PerProcessImageCacheItem
{
PerProcessImageCacheItem(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value)
: key(key), value(value)
{
}
std::shared_ptr<ImageCacheKey> key;
std::shared_ptr<ImageCacheValue> value;
};
PerProcessImageCacheValue::PerProcessImageCacheValue(void* data,
uint64_t size,
void* user_obj,
const cucim::io::DeviceType device_type)
: ImageCacheValue(data, size, user_obj, device_type){};
PerProcessImageCacheValue::~PerProcessImageCacheValue()
{
if (data)
{
switch (device_type)
{
case io::DeviceType::kCPU:
cucim_free(data);
break;
case io::DeviceType::kCUDA: {
cudaError_t cuda_status;
CUDA_TRY(cudaFree(data));
break;
}
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type);
break;
}
data = nullptr;
}
};
PerProcessImageCache::PerProcessImageCache(const ImageCacheConfig& config, const cucim::io::DeviceType device_type)
: ImageCache(config, CacheType::kPerProcess, device_type),
mutex_array_(config.mutex_pool_capacity),
capacity_nbytes_(kOneMiB * config.memory_capacity),
capacity_(config.capacity),
list_capacity_(config.capacity + config.list_padding),
list_padding_(config.list_padding),
mutex_pool_capacity_(config.mutex_pool_capacity),
stat_is_recorded_(config.record_stat),
list_(config.capacity + config.list_padding),
hashmap_(config.capacity){};
PerProcessImageCache::~PerProcessImageCache()
{
}
const char* PerProcessImageCache::type_str() const
{
return "per_process";
}
std::shared_ptr<ImageCacheKey> PerProcessImageCache::create_key(uint64_t file_hash, uint64_t index)
{
return std::make_shared<ImageCacheKey>(file_hash, index);
}
std::shared_ptr<ImageCacheValue> PerProcessImageCache::create_value(void* data,
uint64_t size,
const cucim::io::DeviceType device_type)
{
return std::make_shared<PerProcessImageCacheValue>(data, size, nullptr, device_type);
}
void* PerProcessImageCache::allocate(std::size_t n)
{
switch (device_type_)
{
case io::DeviceType::kCPU:
return cucim_malloc(n);
case io::DeviceType::kCUDA: {
cudaError_t cuda_status;
void* image_data_ptr = nullptr;
CUDA_TRY(cudaMalloc(&image_data_ptr, n));
return image_data_ptr;
}
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type_);
break;
}
return nullptr;
}
void PerProcessImageCache::lock(uint64_t index)
{
mutex_array_[index % mutex_pool_capacity_].lock();
}
void PerProcessImageCache::unlock(uint64_t index)
{
mutex_array_[index % mutex_pool_capacity_].unlock();
}
void* PerProcessImageCache::mutex(uint64_t index)
{
return &mutex_array_[index % mutex_pool_capacity_];
}
bool PerProcessImageCache::insert(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value)
{
if (value->size > capacity_nbytes_ || capacity_ < 1)
{
return false;
}
while (is_list_full() || is_memory_full(value->size))
{
remove_front();
}
auto item = std::make_shared<PerProcessImageCacheItem>(key, value);
bool succeed = hashmap_.insert(key, item);
if (succeed)
{
push_back(item);
}
else
{
fmt::print(stderr, "{} existing list_[] = {}\n", std::hash<std::thread::id>{}(std::this_thread::get_id()), (uint64_t)item->key->location_hash);
}
return succeed;
}
void PerProcessImageCache::remove_front()
{
while (true)
{
uint32_t head = list_head_.load(std::memory_order_relaxed);
uint32_t tail = list_tail_.load(std::memory_order_relaxed);
if (head != tail)
{
// Remove front by increasing head
if (list_head_.compare_exchange_weak(
head, (head + 1) % list_capacity_, std::memory_order_release, std::memory_order_relaxed))
{
std::shared_ptr<PerProcessImageCacheItem> head_item = list_[head];
size_nbytes_.fetch_sub(head_item->value->size, std::memory_order_relaxed);
hashmap_.erase(head_item->key);
list_[head].reset(); // decrease refcount
break;
}
}
else
{
break; // already empty
}
}
}
uint32_t PerProcessImageCache::size() const
{
uint32_t head = list_head_.load(std::memory_order_relaxed);
uint32_t tail = list_tail_.load(std::memory_order_relaxed);
return (tail + list_capacity_ - head) % list_capacity_;
}
uint64_t PerProcessImageCache::memory_size() const
{
return size_nbytes_.load(std::memory_order_relaxed);
}
uint32_t PerProcessImageCache::capacity() const
{
return capacity_;
}
uint64_t PerProcessImageCache::memory_capacity() const
{
return capacity_nbytes_;
}
uint64_t PerProcessImageCache::free_memory() const
{
return capacity_nbytes_ - size_nbytes_.load(std::memory_order_relaxed);
}
void PerProcessImageCache::record(bool value)
{
config_.record_stat = value;
stat_hit_.store(0, std::memory_order_relaxed);
stat_miss_.store(0, std::memory_order_relaxed);
stat_is_recorded_ = value;
}
bool PerProcessImageCache::record() const
{
return stat_is_recorded_;
}
uint64_t PerProcessImageCache::hit_count() const
{
return stat_hit_.load(std::memory_order_relaxed);
}
uint64_t PerProcessImageCache::miss_count() const
{
return stat_miss_.load(std::memory_order_relaxed);
}
void PerProcessImageCache::reserve(const ImageCacheConfig& config)
{
uint64_t new_memory_capacity_nbytes = kOneMiB * config.memory_capacity;
uint32_t new_capacity = config.capacity;
if (capacity_nbytes_ < new_memory_capacity_nbytes)
{
capacity_nbytes_ = new_memory_capacity_nbytes;
}
if (capacity_ < new_capacity)
{
config_.capacity = config.capacity;
config_.memory_capacity = config.memory_capacity;
uint32_t old_list_capacity = list_capacity_;
capacity_ = new_capacity;
list_capacity_ = new_capacity + list_padding_;
list_.reserve(list_capacity_);
list_.resize(list_capacity_);
hashmap_.reserve(new_capacity);
// Move items in the vector
uint32_t head = list_head_.load(std::memory_order_relaxed);
uint32_t tail = list_tail_.load(std::memory_order_relaxed);
if (tail < head)
{
head = 0;
uint32_t new_head = old_list_capacity;
while (head != tail)
{
list_[new_head] = list_[head];
list_[head].reset();
head = (head + 1) % old_list_capacity;
new_head = (new_head + 1) % list_capacity_;
}
// Set new tail
list_tail_.store(new_head, std::memory_order_relaxed);
}
}
}
std::shared_ptr<ImageCacheValue> PerProcessImageCache::find(const std::shared_ptr<ImageCacheKey>& key)
{
std::shared_ptr<PerProcessImageCacheItem> item;
const bool found = hashmap_.find(key, item);
if(stat_is_recorded_)
{
if (found)
{
stat_hit_.fetch_add(1, std::memory_order_relaxed);
return item->value;
}
else
{
stat_miss_.fetch_add(1, std::memory_order_relaxed);
}
}
else
{
if (found)
{
return item->value;
}
}
return std::shared_ptr<ImageCacheValue>();
}
bool PerProcessImageCache::is_list_full() const
{
if (size() >= capacity_)
{
return true;
}
return false;
}
bool PerProcessImageCache::is_memory_full(uint64_t additional_size) const
{
if (size_nbytes_.load(std::memory_order_relaxed) + additional_size > capacity_nbytes_)
{
return true;
}
else
{
return false;
}
}
void PerProcessImageCache::push_back(std::shared_ptr<PerProcessImageCacheItem>& item)
{
uint32_t tail = list_tail_.load(std::memory_order_relaxed);
while (true)
{
// Push back by increasing tail
if (list_tail_.compare_exchange_weak(
tail, (tail + 1) % list_capacity_, std::memory_order_release, std::memory_order_relaxed))
{
list_[tail] = item;
size_nbytes_.fetch_add(item->value->size, std::memory_order_relaxed);
break;
}
tail = list_tail_.load(std::memory_order_relaxed);
}
}
bool PerProcessImageCache::erase(const std::shared_ptr<ImageCacheKey>& key)
{
const bool succeed = hashmap_.erase(key);
return succeed;
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_config.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/cache/image_cache_config.h"
#include <fmt/format.h>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace cucim::cache
{
void ImageCacheConfig::load_config(const void* json_obj)
{
const json& cache_config = *(static_cast<const json*>(json_obj));
if (cache_config.contains("type") && cache_config["type"].is_string())
{
auto cache_type = cache_config.value("type", kDefaultCacheTypeStr);
type = cucim::cache::lookup_cache_type(cache_type);
}
if (cache_config.contains("memory_capacity") && cache_config["memory_capacity"].is_number_unsigned())
{
memory_capacity = cache_config.value("memory_capacity", kDefaultCacheMemoryCapacity);
capacity = calc_default_cache_capacity(kOneMiB * memory_capacity);
}
if (cache_config.contains("capacity") && cache_config["capacity"].is_number_unsigned())
{
capacity = cache_config.value("capacity", calc_default_cache_capacity(kOneMiB * memory_capacity));
}
if (cache_config.contains("mutex_pool_capacity") && cache_config["mutex_pool_capacity"].is_number_unsigned())
{
mutex_pool_capacity = cache_config.value("mutex_pool_capacity", kDefaultCacheMutexPoolCapacity);
}
if (cache_config.contains("list_padding") && cache_config["list_padding"].is_number_unsigned())
{
list_padding = cache_config.value("list_padding", kDefaultCacheListPadding);
}
if (cache_config.contains("extra_shared_memory_size") && cache_config["extra_shared_memory_size"].is_number_unsigned())
{
extra_shared_memory_size = cache_config.value("extra_shared_memory_size", kDefaultCacheExtraSharedMemorySize);
}
if (cache_config.contains("record_stat") && cache_config["record_stat"].is_boolean())
{
record_stat = cache_config.value("record_stat", kDefaultCacheRecordStat);
}
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/cache_type.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/cache/cache_type.h"
#include "cucim/cpp20/find_if.h"
namespace cucim::cache
{
using namespace std::literals::string_view_literals;
constexpr CacheType CacheTypeMap::at(const std::string_view& key) const
{
const auto itr = cucim::cpp20::find_if(begin(data), end(data), [&key](const auto& v) { return v.first == key; });
if (itr != end(data))
{
return itr->second;
}
else
{
return CacheType::kNoCache;
}
}
constexpr std::string_view CacheTypeStrMap::at(const CacheType& key) const
{
const auto itr = cucim::cpp20::find_if(begin(data), end(data), [&key](const auto& v) { return v.first == key; });
if (itr != end(data))
{
return itr->second;
}
else
{
return "nocache"sv;
}
}
static constexpr std::array<std::pair<std::string_view, CacheType>, kCacheTypeCount> cache_type_values{
{ { "nocache"sv, CacheType::kNoCache },
{ "per_process"sv, CacheType::kPerProcess },
{ "shared_memory"sv, CacheType::kSharedMemory } }
};
CacheType lookup_cache_type(const std::string_view sv)
{
static constexpr auto map = CacheTypeMap{ { cache_type_values } };
return map.at(sv);
}
static constexpr std::array<std::pair<CacheType, std::string_view>, kCacheTypeCount> cache_type_str_values{
{ { CacheType::kNoCache, "nocache"sv },
{ CacheType::kPerProcess, "per_process"sv },
{ CacheType::kSharedMemory, "shared_memory"sv } }
};
std::string_view lookup_cache_type_str(const CacheType key)
{
static constexpr auto map = CacheTypeStrMap{ { cache_type_str_values } };
return map.at(key);
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_empty.h | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_CACHE_IMAGE_CACHE_EMPTY_H
#define CUCIM_CACHE_IMAGE_CACHE_EMPTY_H
#include "cucim/cache/image_cache.h"
namespace cucim::cache
{
/**
* @brief Image Cache for loading tiles.
*
* FIFO is used for cache replacement policy here.
*
*/
class EmptyImageCache : public ImageCache
{
public:
EmptyImageCache(const ImageCacheConfig& config);
std::shared_ptr<ImageCacheKey> create_key(uint64_t file_hash, uint64_t index) override;
std::shared_ptr<ImageCacheValue> create_value(
void* data, uint64_t size, const cucim::io::DeviceType device_type = cucim::io::DeviceType::kCPU) override;
void* allocate(std::size_t n) override;
void lock(uint64_t index) override;
void unlock(uint64_t index) override;
void* mutex(uint64_t index) override;
bool insert(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value) override;
void remove_front() override;
uint32_t size() const override;
uint64_t memory_size() const override;
uint32_t capacity() const override;
uint64_t memory_capacity() const override;
uint64_t free_memory() const override;
void record(bool value) override;
bool record() const override;
uint64_t hit_count() const override;
uint64_t miss_count() const override;
void reserve(const ImageCacheConfig& config) override;
std::shared_ptr<ImageCacheValue> find(const std::shared_ptr<ImageCacheKey>& key) override;
private:
ImageCacheConfig config_;
};
} // namespace cucim::cache
#endif // CUCIM_CACHE_IMAGE_CACHE_EMPTY_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_empty.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "image_cache_empty.h"
namespace cucim::cache
{
EmptyImageCache::EmptyImageCache(const ImageCacheConfig& config) : ImageCache(config){};
std::shared_ptr<ImageCacheKey> EmptyImageCache::create_key(uint64_t, uint64_t)
{
return std::make_shared<ImageCacheKey>(0, 0);
}
std::shared_ptr<ImageCacheValue> EmptyImageCache::create_value(void*, uint64_t, const cucim::io::DeviceType)
{
return std::make_shared<ImageCacheValue>(nullptr, 0);
}
void* EmptyImageCache::allocate(std::size_t)
{
return nullptr;
}
void EmptyImageCache::lock(uint64_t)
{
return;
}
void EmptyImageCache::unlock(uint64_t)
{
return;
}
void* EmptyImageCache::mutex(uint64_t)
{
return nullptr;
}
bool EmptyImageCache::insert(std::shared_ptr<ImageCacheKey>&, std::shared_ptr<ImageCacheValue>&)
{
return true;
}
void EmptyImageCache::remove_front()
{
}
uint32_t EmptyImageCache::size() const
{
return 0;
}
uint64_t EmptyImageCache::memory_size() const
{
return 0;
}
uint32_t EmptyImageCache::capacity() const
{
return 0;
}
uint64_t EmptyImageCache::memory_capacity() const
{
return 0;
}
uint64_t EmptyImageCache::free_memory() const
{
return 0;
}
void EmptyImageCache::record(bool)
{
return;
}
bool EmptyImageCache::record() const
{
return false;
}
uint64_t EmptyImageCache::hit_count() const
{
return 0;
}
uint64_t EmptyImageCache::miss_count() const
{
return 0;
}
void EmptyImageCache::reserve(const ImageCacheConfig&)
{
}
std::shared_ptr<ImageCacheValue> EmptyImageCache::find(const std::shared_ptr<ImageCacheKey>&)
{
return std::shared_ptr<ImageCacheValue>();
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/cache/image_cache_shared_memory.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "image_cache_shared_memory.h"
#include "cucim/cuimage.h"
#include "cucim/memory/memory_manager.h"
#include <boost/make_shared.hpp>
#include <fmt/format.h>
template <>
struct boost::hash<cucim::cache::MapKey>
{
typedef cucim::cache::MapKey argument_type;
typedef size_t result_type;
result_type operator()(argument_type::type& s) const
{
std::size_t h1 = std::hash<uint64_t>{}(s->file_hash);
std::size_t h2 = std::hash<uint64_t>{}(s->location_hash);
return h1 ^ (h2 << 1); // or use boost::hash_combine
}
result_type operator()(const argument_type::type& s) const
{
std::size_t h1 = std::hash<uint64_t>{}(s->file_hash);
std::size_t h2 = std::hash<uint64_t>{}(s->location_hash);
return h1 ^ (h2 << 1); // or use boost::hash_combine
}
result_type operator()(const cucim::cache::ImageCacheKey& s) const
{
std::size_t h1 = std::hash<uint64_t>{}(s.file_hash);
std::size_t h2 = std::hash<uint64_t>{}(s.location_hash);
return h1 ^ (h2 << 1); // or use boost::hash_combine
}
result_type operator()(const std::shared_ptr<cucim::cache::ImageCacheKey>& s) const
{
std::size_t h1 = std::hash<uint64_t>{}(s->file_hash);
std::size_t h2 = std::hash<uint64_t>{}(s->location_hash);
return h1 ^ (h2 << 1); // or use boost::hash_combine
}
};
template <>
struct std::equal_to<cucim::cache::MapKey>
{
typedef cucim::cache::MapKey argument_type;
bool operator()(const argument_type::type& lhs, const argument_type::type& rhs) const
{
return lhs->location_hash == rhs->location_hash && lhs->file_hash == rhs->file_hash;
}
bool operator()(const argument_type::type& lhs, const cucim::cache::ImageCacheKey& rhs) const
{
return lhs->location_hash == rhs.location_hash && lhs->file_hash == rhs.file_hash;
}
bool operator()(const cucim::cache::ImageCacheKey& lhs, const std::shared_ptr<cucim::cache::ImageCacheKey>& rhs) const
{
return lhs.location_hash == rhs->location_hash && lhs.file_hash == rhs->file_hash;
}
};
namespace cucim::cache
{
template <class P>
struct null_deleter
{
private:
P p_;
public:
null_deleter(const P& p) : p_(p)
{
}
void operator()(void const*)
{
p_.reset();
}
P const& get() const
{
return p_;
}
};
template <class T>
shared_mem_deleter<T>::shared_mem_deleter(std::unique_ptr<boost::interprocess::managed_shared_memory>& segment)
: seg_(segment)
{
}
template <class T>
void shared_mem_deleter<T>::operator()(T* p)
{
if (seg_)
{
seg_->destroy_ptr(p);
}
}
// Apparently, cache requires about 13MiB + (400 bytes per one capacity) for the data structure (hashmap+vector).
// so allocate additional bytes which are 100MiB (exta) + 512(rough estimation per item) * (capacity) bytes.
// Not having enough segment(shared) memory can cause a memory allocation failure and the process can get stuck.
// https://stackoverflow.com/questions/4166642/how-much-memory-should-managed-shared-memory-allocate-boost
static size_t calc_segment_size(const ImageCacheConfig& config)
{
return kOneMiB * config.memory_capacity + (config.extra_shared_memory_size * kOneMiB + 512 * config.capacity);
}
template <class T>
using deleter_type = boost::interprocess::shared_ptr<
T,
boost::interprocess::allocator<
void,
boost::interprocess::segment_manager<char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>,
boost::interprocess::iset_index>>,
boost::interprocess::deleter<
T,
boost::interprocess::segment_manager<char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>,
boost::interprocess::iset_index>>>;
struct ImageCacheItemDetail
{
ImageCacheItemDetail(deleter_type<ImageCacheKey>& key, deleter_type<SharedMemoryImageCacheValue>& value)
: key(key), value(value)
{
}
deleter_type<ImageCacheKey> key;
deleter_type<SharedMemoryImageCacheValue> value;
};
SharedMemoryImageCacheValue::SharedMemoryImageCacheValue(void* data,
uint64_t size,
void* user_obj,
const cucim::io::DeviceType device_type)
: ImageCacheValue(data, size, user_obj, device_type){};
SharedMemoryImageCacheValue::~SharedMemoryImageCacheValue()
{
if (data)
{
if (user_obj)
{
static_cast<boost::interprocess::managed_shared_memory*>(user_obj)->deallocate(data);
data = nullptr;
}
}
};
SharedMemoryImageCache::SharedMemoryImageCache(const ImageCacheConfig& config, const cucim::io::DeviceType device_type)
: ImageCache(config, CacheType::kSharedMemory, device_type),
segment_(create_segment(config)),
// mutex_array_(nullptr, shared_mem_deleter<boost::interprocess::interprocess_mutex>(segment_)),
size_nbytes_(nullptr, shared_mem_deleter<std::atomic<uint64_t>>(segment_)),
capacity_nbytes_(nullptr, shared_mem_deleter<uint64_t>(segment_)),
capacity_(nullptr, shared_mem_deleter<uint32_t>(segment_)),
list_capacity_(nullptr, shared_mem_deleter<uint32_t>(segment_)),
list_padding_(nullptr, shared_mem_deleter<uint32_t>(segment_)),
mutex_pool_capacity_(nullptr, shared_mem_deleter<uint32_t>(segment_)),
stat_hit_(nullptr, shared_mem_deleter<std::atomic<uint64_t>>(segment_)),
stat_miss_(nullptr, shared_mem_deleter<std::atomic<uint64_t>>(segment_)),
stat_is_recorded_(nullptr, shared_mem_deleter<bool>(segment_)),
list_head_(nullptr, shared_mem_deleter<std::atomic<uint32_t>>(segment_)),
list_tail_(nullptr, shared_mem_deleter<std::atomic<uint32_t>>(segment_))
{
const uint64_t& memory_capacity = config.memory_capacity;
const uint32_t& capacity = config.capacity;
const uint32_t& mutex_pool_capacity = config.mutex_pool_capacity;
const bool& record_stat = config.record_stat;
if (device_type != cucim::io::DeviceType::kCPU)
{
throw std::runtime_error(
fmt::format("[Error] SharedMemoryImageCache doesn't support other memory type other than CPU memory!\n"));
}
try
{
// mutex_array_.reset(segment_->find_or_construct_it<boost::interprocess::interprocess_mutex>(
// "cucim-mutex")[mutex_pool_capacity]());
mutex_array_ =
segment_->construct_it<boost::interprocess::interprocess_mutex>("cucim-mutex")[mutex_pool_capacity]();
size_nbytes_.reset(segment_->find_or_construct<std::atomic<uint64_t>>("size_nbytes_")(0)); /// size of cache
/// memory used
capacity_nbytes_.reset(
segment_->find_or_construct<uint64_t>("capacity_nbytes_")(kOneMiB * memory_capacity)); /// size of
/// cache
/// memory
/// allocated
capacity_.reset(segment_->find_or_construct<uint32_t>("capacity_")(capacity)); /// capacity
/// of hashmap
list_capacity_.reset(
segment_->find_or_construct<uint32_t>("list_capacity_")(capacity + config.list_padding)); /// capacity
/// of
/// list
list_padding_.reset(segment_->find_or_construct<uint32_t>("list_padding_")(config.list_padding)); /// gap
/// between
/// head and
/// tail
mutex_pool_capacity_.reset(segment_->find_or_construct<uint32_t>("mutex_pool_capacity_")(mutex_pool_capacity));
stat_hit_.reset(segment_->find_or_construct<std::atomic<uint64_t>>("stat_hit_")(0)); /// cache hit count
stat_miss_.reset(segment_->find_or_construct<std::atomic<uint64_t>>("stat_miss_")(0)); /// cache miss mcount
stat_is_recorded_.reset(segment_->find_or_construct<bool>("stat_is_recorded_")(record_stat)); /// whether if
/// cache stat is
/// recorded or
/// not
list_head_.reset(segment_->find_or_construct<std::atomic<uint32_t>>("list_head_")(0)); /// head
list_tail_.reset(segment_->find_or_construct<std::atomic<uint32_t>>("list_tail_")(0)); /// tail
list_ = boost::interprocess::make_managed_shared_ptr(
segment_->find_or_construct<QueueType>("cucim-list")(
*list_capacity_, ValueAllocator(segment_->get_segment_manager())),
*segment_);
hashmap_ = boost::interprocess::make_managed_shared_ptr(
segment_->find_or_construct<ImageCacheType>("cucim-hashmap")(
calc_hashmap_capacity(capacity), MapKeyHasher(), MakKeyEqual(),
ImageCacheAllocator(segment_->get_segment_manager())),
*segment_);
}
catch (const boost::interprocess::bad_alloc& e)
{
throw std::runtime_error(fmt::format(
"[Error] Couldn't allocate shared memory (size: {}). Please increase the cache memory capacity.\n",
memory_capacity));
}
};
SharedMemoryImageCache::~SharedMemoryImageCache()
{
{
// Destroy objects that uses the shared memory object(segment_)
hashmap_.reset();
list_.reset();
segment_->destroy<boost::interprocess::interprocess_mutex>("cucim-mutex");
mutex_array_ = nullptr;
// mutex_array_.reset();
// Destroy the shared memory object
segment_.reset();
}
bool succeed = remove_shmem();
if (!succeed)
{
fmt::print(stderr, "[Warning] Couldn't delete the shared memory object '{}'.",
cucim::CuImage::get_config()->shm_name());
}
}
const char* SharedMemoryImageCache::type_str() const
{
return "shared_memory";
}
std::shared_ptr<ImageCacheKey> SharedMemoryImageCache::create_key(uint64_t file_hash, uint64_t index)
{
auto key = boost::interprocess::make_managed_shared_ptr(
segment_->find_or_construct<ImageCacheKey>(boost::interprocess::anonymous_instance)(file_hash, index), *segment_);
return std::shared_ptr<ImageCacheKey>(key.get().get(), null_deleter<decltype(key)>(key));
}
std::shared_ptr<ImageCacheValue> SharedMemoryImageCache::create_value(void* data,
uint64_t size,
const cucim::io::DeviceType device_type)
{
auto value = boost::interprocess::make_managed_shared_ptr(
segment_->find_or_construct<SharedMemoryImageCacheValue>(boost::interprocess::anonymous_instance)(
data, size, &*segment_, device_type),
*segment_);
return std::shared_ptr<ImageCacheValue>(value.get().get(), null_deleter<decltype(value)>(value));
}
void* SharedMemoryImageCache::allocate(std::size_t n)
{
// TODO: handling OOM exception
void* temp = nullptr;
try
{
// fmt::print(stderr, "## pid: {} memory_size: {}, memory_capacity: {}, free_memory: {}\n", getpid(),
// memory_size(), memory_capacity(), free_memory());
// fmt::print(
// stderr, "## pid: {} size_nbytes: {}, capacity_nbytes: {}\n", getpid(), *size_nbytes_, *capacity_nbytes_);
// fmt::print(stderr, "## pid: {}, {} hit:{} miss:{} total:{} | {}/{} hash size:{}\n", getpid(),
// segment_->get_free_memory(), *stat_hit_, *stat_miss_, *stat_hit_ + *stat_miss_, size(),
// *list_capacity_, hashmap_->size());
temp = segment_->allocate(n);
}
catch (const std::exception& e)
{
throw std::runtime_error(fmt::format(
"[Error] Couldn't allocate shared memory (size: {}). Please increase the cache memory capacity.\n", n));
}
return temp;
}
void SharedMemoryImageCache::lock(uint64_t index)
{
// fmt::print(stderr, "# {}: {} {} [{}]- lock\n",
// std::chrono::high_resolution_clock::now().time_since_epoch().count(),
// getpid(), index, index % *mutex_pool_capacity_);
mutex_array_[index % *mutex_pool_capacity_].lock();
}
void SharedMemoryImageCache::unlock(uint64_t index)
{
// fmt::print(stderr, "# {}: {} {} [{}]- unlock\n",
// std::chrono::high_resolution_clock::now().time_since_epoch().count(), getpid(), index,
// index % *mutex_pool_capacity_);
mutex_array_[index % *mutex_pool_capacity_].unlock();
}
void* SharedMemoryImageCache::mutex(uint64_t index)
{
return &mutex_array_[index % *mutex_pool_capacity_];
}
bool SharedMemoryImageCache::insert(std::shared_ptr<ImageCacheKey>& key, std::shared_ptr<ImageCacheValue>& value)
{
if (value->size > *capacity_nbytes_ || *capacity_ < 1)
{
return false;
}
while (is_list_full() || is_memory_full(value->size))
{
remove_front();
}
auto key_impl = std::get_deleter<null_deleter<deleter_type<ImageCacheKey>>>(key)->get();
auto value_impl = std::get_deleter<null_deleter<deleter_type<SharedMemoryImageCacheValue>>>(value)->get();
auto item = boost::interprocess::make_managed_shared_ptr(
segment_->find_or_construct<ImageCacheItemDetail>(boost::interprocess::anonymous_instance)(key_impl, value_impl),
*segment_);
bool succeed = hashmap_->insert(key_impl, item);
if (succeed)
{
push_back(item);
}
return succeed;
}
void SharedMemoryImageCache::remove_front()
{
while (true)
{
uint32_t head = (*list_head_).load(std::memory_order_relaxed);
uint32_t tail = (*list_tail_).load(std::memory_order_relaxed);
if (head != tail)
{
// Remove front by increasing head
if ((*list_head_)
.compare_exchange_weak(
head, (head + 1) % (*list_capacity_), std::memory_order_release, std::memory_order_relaxed))
{
auto& head_item = (*list_)[head];
if (head_item) // it is possible that head_item is nullptr
{
(*size_nbytes_).fetch_sub(head_item->value->size, std::memory_order_relaxed);
hashmap_->erase(head_item->key);
(*list_)[head].reset(); // decrease refcount
break;
}
}
}
else
{
break; // already empty
}
}
}
uint32_t SharedMemoryImageCache::size() const
{
uint32_t head = list_head_->load(std::memory_order_relaxed);
uint32_t tail = list_tail_->load(std::memory_order_relaxed);
return (tail + *list_capacity_ - head) % *list_capacity_;
}
uint64_t SharedMemoryImageCache::memory_size() const
{
return size_nbytes_->load(std::memory_order_relaxed);
}
uint32_t SharedMemoryImageCache::capacity() const
{
return *capacity_;
}
uint64_t SharedMemoryImageCache::memory_capacity() const
{
// Return segment's size instead of the logical capacity.
return segment_->get_size();
// return *capacity_nbytes_;
}
uint64_t SharedMemoryImageCache::free_memory() const
{
// Return segment's free memory instead of the logical free memory.
return segment_->get_free_memory();
// return *capacity_nbytes_ - size_nbytes_->load(std::memory_order_relaxed);
}
void SharedMemoryImageCache::record(bool value)
{
config_.record_stat = value;
stat_hit_->store(0, std::memory_order_relaxed);
stat_miss_->store(0, std::memory_order_relaxed);
*stat_is_recorded_ = value;
}
bool SharedMemoryImageCache::record() const
{
return *stat_is_recorded_;
}
uint64_t SharedMemoryImageCache::hit_count() const
{
return stat_hit_->load(std::memory_order_relaxed);
}
uint64_t SharedMemoryImageCache::miss_count() const
{
return stat_miss_->load(std::memory_order_relaxed);
}
void SharedMemoryImageCache::reserve(const ImageCacheConfig& config)
{
uint64_t new_memory_capacity_nbytes = kOneMiB * config.memory_capacity;
uint32_t new_capacity = config.capacity;
if ((*capacity_nbytes_) < new_memory_capacity_nbytes)
{
(*capacity_nbytes_) = new_memory_capacity_nbytes;
}
if ((*capacity_) < new_capacity)
{
config_.capacity = config.capacity;
config_.memory_capacity = config.memory_capacity;
uint32_t old_list_capacity = (*list_capacity_);
(*capacity_) = new_capacity;
(*list_capacity_) = new_capacity + (*list_padding_);
list_->reserve(*list_capacity_);
list_->resize(*list_capacity_);
hashmap_->reserve(new_capacity);
// Move items in the vector
uint32_t head = (*list_head_).load(std::memory_order_relaxed);
uint32_t tail = (*list_tail_).load(std::memory_order_relaxed);
if (tail < head)
{
head = 0;
uint32_t new_head = old_list_capacity;
while (head != tail)
{
(*list_)[new_head] = (*list_)[head];
(*list_)[head].reset();
head = (head + 1) % old_list_capacity;
new_head = (new_head + 1) % (*list_capacity_);
}
// Set new tail
(*list_tail_).store(new_head, std::memory_order_relaxed);
}
}
}
std::shared_ptr<ImageCacheValue> SharedMemoryImageCache::find(const std::shared_ptr<ImageCacheKey>& key)
{
MapValue::type item;
auto key_impl = std::get_deleter<null_deleter<deleter_type<ImageCacheKey>>>(key)->get();
const bool found = hashmap_->find(key_impl, item);
if (*stat_is_recorded_)
{
if (found)
{
(*stat_hit_).fetch_add(1, std::memory_order_relaxed);
return std::shared_ptr<ImageCacheValue>(item->value.get().get(), null_deleter<decltype(item)>(item));
}
else
{
(*stat_miss_).fetch_add(1, std::memory_order_relaxed);
}
}
else
{
if (found)
{
return std::shared_ptr<ImageCacheValue>(item->value.get().get(), null_deleter<decltype(item)>(item));
}
}
return std::shared_ptr<ImageCacheValue>();
}
bool SharedMemoryImageCache::is_list_full() const
{
if (size() >= *capacity_)
{
return true;
}
return false;
}
bool SharedMemoryImageCache::is_memory_full(uint64_t additional_size) const
{
if (size_nbytes_->load(std::memory_order_relaxed) + additional_size > *capacity_nbytes_)
{
return true;
}
else
{
return false;
}
}
void SharedMemoryImageCache::push_back(cache_item_type<ImageCacheItemDetail>& item)
{
uint32_t tail = (*list_tail_).load(std::memory_order_relaxed);
while (true)
{
// Push back by increasing tail
if ((*list_tail_)
.compare_exchange_weak(
tail, (tail + 1) % (*list_capacity_), std::memory_order_release, std::memory_order_relaxed))
{
(*list_)[tail] = item;
(*size_nbytes_).fetch_add(item->value->size, std::memory_order_relaxed);
break;
}
tail = (*list_tail_).load(std::memory_order_relaxed);
}
}
bool SharedMemoryImageCache::erase(const std::shared_ptr<ImageCacheKey>& key)
{
auto key_impl = std::get_deleter<null_deleter<deleter_type<ImageCacheKey>>>(key)->get();
const bool succeed = hashmap_->erase(key_impl);
return succeed;
}
bool SharedMemoryImageCache::remove_shmem()
{
cucim::config::Config* config = cucim::CuImage::get_config();
if (config)
{
std::string shm_name = config->shm_name();
return boost::interprocess::shared_memory_object::remove(shm_name.c_str());
}
return false;
}
uint32_t SharedMemoryImageCache::calc_hashmap_capacity(uint32_t capacity)
{
return std::max((1U << 16) * 4, capacity * 4);
}
std::unique_ptr<boost::interprocess::managed_shared_memory> SharedMemoryImageCache::create_segment(
const ImageCacheConfig& config)
{
// Remove the existing shared memory object.
remove_shmem();
auto segment = std::make_unique<boost::interprocess::managed_shared_memory>(
boost::interprocess::open_or_create, cucim::CuImage::get_config()->shm_name().c_str(), calc_segment_size(config));
return segment;
}
} // namespace cucim::cache
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/logger/timer.cpp | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/logger/timer.h"
#include <fmt/format.h>
namespace cucim::logger
{
Timer::Timer(const char* message, bool auto_start, bool auto_output)
{
message_ = message;
is_auto_output_ = auto_output;
if (auto_start)
{
elapsed_seconds_ = 0.0;
start_ = std::chrono::high_resolution_clock::now();
}
}
void Timer::start()
{
elapsed_seconds_ = 0.0;
start_ = std::chrono::high_resolution_clock::now();
}
double Timer::stop()
{
end_ = std::chrono::high_resolution_clock::now();
elapsed_seconds_ = std::chrono::duration_cast<std::chrono::duration<double>>(end_ - start_).count();
return elapsed_seconds_;
}
double Timer::elapsed_time()
{
return elapsed_seconds_;
}
void Timer::print(const char* message)
{
if (message)
{
fmt::print(stderr, message, elapsed_seconds_);
}
else
{
fmt::print(stderr, message_, elapsed_seconds_);
}
}
Timer::~Timer()
{
if (elapsed_seconds_ <= 0.0)
{
end_ = std::chrono::high_resolution_clock::now();
elapsed_seconds_ = std::chrono::duration_cast<std::chrono::duration<double>>(end_ - start_).count();
}
if (is_auto_output_)
{
print();
}
}
} // namespace cucim::logger
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/logger/logger.cpp | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/cucim_framework.h | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_CUCIM_FRAMEWORK_H
#define CUCIM_CUCIM_FRAMEWORK_H
#include "cucim/core/framework.h"
#include "plugin_manager.h"
#include "cucim_plugin.h"
#include <mutex>
#include <memory>
#include <string>
#include <unordered_map>
namespace cucim
{
class CuCIMFramework
{
public:
CuCIMFramework();
~CuCIMFramework();
bool register_plugin(const char* client_name, const PluginRegistrationDesc& desc);
bool register_plugin(const std::string& file_path, bool reloadable = false, bool unload = false);
bool register_plugin(const std::shared_ptr<Plugin>& plugin);
bool unregister_plugin(const char* name);
void unregister_plugin(Plugin* plugin);
bool try_terminate_plugin(Plugin* plugin, std::vector<Plugin*>* plugins_to_load);
void load_plugins(const PluginLoadingDesc& desc);
void unload_all_plugins();
bool resolve_plugin_dependencies(Plugin* plugin);
bool resolve_interface_dependency(const Plugin::InterfaceData& info, bool log_errors);
bool resolve_interface_dependency_with_logging(const Plugin::InterfaceData& desc);
bool resolve_interface_dependency_no_logging(const Plugin::InterfaceData& desc);
Plugin::Interface get_default_plugin(const InterfaceDesc& desc, bool optional);
Plugin::Interface get_specific_plugin(const InterfaceDesc& desc, const char* plugin_name, bool optional);
void* acquire_interface(const char* client, const InterfaceDesc& desc, const char* plugin_name, bool optional = false);
void* acquire_interface_from_library(const char* client,
const InterfaceDesc& desc,
const char* library_path,
bool optional = false);
size_t get_plugin_count() const;
void get_plugins(PluginDesc* out_plugins) const;
size_t get_plugin_index(const char* name) const;
Plugin* get_plugin(size_t index) const;
Plugin* get_plugin(const char* name) const;
Plugin* get_plugin_by_library_path(const std::string& library_path);
// cuCIM-specific methods;
void load_plugin(const char* library_path);
std::string& get_plugin_root();
void set_plugin_root(const char* path);
private:
struct CandidatesEntry
{
std::vector<Plugin::Interface> candidates;
Plugin::Interface selected = {};
std::string specifiedDefaultPlugin;
};
using Mutex = std::recursive_mutex;
using ScopedLock = std::unique_lock<Mutex>;
mutable Mutex mutex_;
std::vector<size_t> plugin_load_order_;
PluginManager plugin_manager_;
std::unordered_map<std::string, size_t> library_path_to_plugin_index_;
std::unordered_map<std::string, size_t> name_to_plugin_index_;
std::unordered_map<std::string, CandidatesEntry> interface_candidates_;
std::unordered_map<const void*, Plugin::Interface> ptr_to_interface_;
// cuCIM-specific fields;
std::string plugin_root_path_;
};
} // namespace cucim
#endif // CUCIM_CUCIM_FRAMEWORK_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/plugin_manager.h | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_PLUGIN_MANAGER_H
#define CUCIM_PLUGIN_MANAGER_H
#include <cstddef>
#include <limits>
#include <vector>
#include <memory>
#include <unordered_set>
#include "cucim/macros/defines.h"
namespace cucim
{
class Plugin;
const size_t kInvalidPluginIndex = std::numeric_limits<size_t>::max();
class PluginManager
{
public:
size_t add_plugin(std::shared_ptr<Plugin> plugin)
{
size_t index = plugin_list_.size();
plugin_list_.push_back(std::move(plugin));
plugin_indices_.insert(index);
return index;
}
void remove_plugin(size_t index)
{
CUCIM_ASSERT(plugin_indices_.find(index) != plugin_indices_.end());
CUCIM_ASSERT(index < plugin_list_.size());
plugin_indices_.erase(index);
plugin_list_[index] = nullptr;
}
Plugin* get_plugin(size_t index) const
{
CUCIM_ASSERT(index < plugin_list_.size());
return plugin_list_[index].get();
}
const std::unordered_set<size_t>& get_plugin_indices() const
{
return plugin_indices_;
}
private:
std::vector<std::shared_ptr<Plugin>> plugin_list_;
std::unordered_set<size_t> plugin_indices_;
};
} // namespace cucim
#endif // CUCIM_PLUGIN_MANAGER_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/cucim_framework.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim_framework.h"
#include "cucim_plugin.h"
#include <memory>
#include <algorithm>
namespace cucim
{
CuCIMFramework::CuCIMFramework()
{
}
CuCIMFramework::~CuCIMFramework()
{
g_cucim_framework = nullptr;
// assert::deregisterAssertForClient();
// logging::deregisterLoggingForClient();
}
bool CuCIMFramework::register_plugin(const char* client_name, const PluginRegistrationDesc& desc)
{
(void)client_name;
(void)desc;
return false;
}
bool CuCIMFramework::unregister_plugin(const char* plugin_name)
{
ScopedLock g(mutex_);
Plugin* plugin = get_plugin(plugin_name);
if (!plugin)
{
CUCIM_LOG_WARN("unregisterPlugin: Failed to find a plugin with a name: %s.", plugin_name ? plugin_name : "");
return false;
}
std::vector<Plugin*> plugins_to_unload;
if (try_terminate_plugin(plugin, &plugins_to_unload))
{
for (size_t idx = 0, plugin_count = plugins_to_unload.size(); idx < plugin_count; ++idx)
{
plugins_to_unload[idx]->unload();
}
unregister_plugin(plugin);
return true;
}
return false;
}
void CuCIMFramework::unregister_plugin(Plugin* plugin)
{
// Remove plugin from all storages
name_to_plugin_index_.erase(plugin->name_cstr());
const std::string& file_path = plugin->library_path();
if (!file_path.empty())
library_path_to_plugin_index_.erase(file_path);
// Remove all its interfaces from candidates and reset selected if it not valid anymore
const auto& interfaces = plugin->get_interfaces();
for (size_t i = 0; i < interfaces.size(); i++)
{
CandidatesEntry& entry = interface_candidates_[interfaces[i].name];
for (size_t k = 0; k < entry.candidates.size(); k++)
{
if (entry.candidates[k].plugin_index == plugin->index_)
{
// Replace with last element (unordered fast remove)
if (entry.candidates.size() > 1)
{
entry.candidates[k] = entry.candidates.back();
}
entry.candidates.resize(entry.candidates.size() - 1);
}
}
if (!entry.selected.get_plugin(plugin_manager_))
entry.selected = {};
}
plugin_manager_.remove_plugin(plugin->index_);
delete plugin;
}
void CuCIMFramework::load_plugins(const PluginLoadingDesc& desc)
{
(void)desc;
}
bool CuCIMFramework::register_plugin(const std::shared_ptr<Plugin>& plugin)
{
ScopedLock g(mutex_);
// TODO: duplicate check
// Success storing plugin in all registries
size_t plugin_index = plugin_manager_.add_plugin(plugin);
plugin->index_ = plugin_index;
const auto& interfaces = plugin->get_interfaces();
for (size_t i = 0; i < interfaces.size(); i++)
{
interface_candidates_[interfaces[i].name].candidates.push_back({ plugin_index, i });
}
// TODO: reloadable check
name_to_plugin_index_[plugin->name_cstr()] = plugin_index;
return true;
}
size_t CuCIMFramework::get_plugin_count() const
{
ScopedLock g(mutex_);
return plugin_manager_.get_plugin_indices().size();
}
void CuCIMFramework::get_plugins(PluginDesc* out_plugins) const
{
ScopedLock g(mutex_);
const std::unordered_set<size_t>& plugins = plugin_manager_.get_plugin_indices();
size_t i = 0;
for (const auto& plugin_index : plugins)
{
if (out_plugins)
{
out_plugins[i++] = plugin_manager_.get_plugin(plugin_index)->get_plugin_desc();
}
}
}
size_t CuCIMFramework::get_plugin_index(const char* name) const
{
auto it = name_to_plugin_index_.find(name);
if (it != name_to_plugin_index_.end())
{
return it->second;
}
return kInvalidPluginIndex;
}
Plugin* CuCIMFramework::get_plugin(size_t index) const
{
return index != kInvalidPluginIndex ? plugin_manager_.get_plugin(index) : nullptr;
}
Plugin* CuCIMFramework::get_plugin(const char* name) const
{
return get_plugin(get_plugin_index(name));
}
Plugin* CuCIMFramework::get_plugin_by_library_path(const std::string& library_path)
{
auto it = library_path_to_plugin_index_.find(library_path);
if (it != library_path_to_plugin_index_.end())
{
return get_plugin(it->second);
}
return nullptr;
}
bool CuCIMFramework::resolve_plugin_dependencies(Plugin* plugin)
{
if (plugin->resolve_state_ == Plugin::ResolveState::kResolved)
return true;
// const bool failed_before = (plugin->resolve_state_ == Plugin::ResolveState::kFailed);
plugin->resolve_state_ = Plugin::ResolveState::kInprocess;
// bool resolveFailed = false;
// for (auto& dep : plugin->getDeps())
// {
// if (!resolveInterfaceDependencyWithLogging(dep))
// {
// CUCIM_LOG_ERROR("[Plugin: %s] Dependency: %s failed to be resolved.", plugin->getName(), CSTR(dep));
// resolveFailed = true;
// }
// }
//
// if (resolveFailed)
// {
// plugin->resolveState = Plugin::ResolveState::eFailed;
// return false;
// }
//
// if (failed_before)
// {
// CUCIM_LOG_INFO("[Plugin: %s] Dependencies were resolved now (failed before).", plugin->getName());
// }
plugin->resolve_state_ = Plugin::ResolveState::kResolved;
return true;
}
bool CuCIMFramework::resolve_interface_dependency(const Plugin::InterfaceData& desc, bool log_errors)
{
(void)log_errors;
const auto it = interface_candidates_.find(desc.name);
if (it != interface_candidates_.cend())
{
// Check for selected (default) plugins first
CandidatesEntry& entry = (*it).second;
Plugin* plugin = entry.selected.get_plugin(plugin_manager_);
if (plugin)
{
if (plugin->resolve_state_ == Plugin::ResolveState::kInprocess)
{
// // todo: Give more insight on how it happened
// if (log_errors)
// {
// CUCIM_LOG_ERROR(
// "Circular dependency detected! Interface: %s requested. But plugin with an
// interface: %s is already in resolving state.", CSTR(desc),
// CSTR(entry.selected.get_interface_desc(m_registry)));
// }
return false;
}
if (!is_version_semantically_compatible(
desc.version, entry.selected.get_interface_desc(plugin_manager_).version))
{
// if (log_errors)
// {
// CUCIM_LOG_ERROR(
// "Interface: %s requested. But there is already a plugin with an interface: %s
// loaded. Versions are incompatible. Only one version of the same
// interface/plugin can exist at a time.", CSTR(desc),
// CSTR(entry.selected.get_interface_desc(m_registry)));
// }
return false;
}
return true;
}
// Search for all plugins with that interface for matching version. If any of them marked as default - pick it
// and early out. If there is no defaults - the first one to match is selected, which should the highest
// compatible version.
Plugin::Interface candidate = {};
for (Plugin::Interface& c : entry.candidates)
{
// Check that candidate is still valid (could have been unregistered)
Plugin* candidatePlugin = c.get_plugin(plugin_manager_);
if (candidatePlugin)
{
if (candidate.plugin_index == kInvalidPluginIndex)
candidate = c;
if (c.get_plugin(plugin_manager_)->name_str() == entry.specifiedDefaultPlugin)
{
candidate = c;
break;
}
}
}
// Resolve all dependencies recursively for the candidate if it has changed
Plugin* candidate_plugin = candidate.get_plugin(plugin_manager_);
if (candidate_plugin && entry.selected.plugin_index != candidate_plugin->index_)
{
// set candidate as selected to catch circular dependencies
entry.selected = candidate;
if (resolve_plugin_dependencies(candidate_plugin))
{
// // the default plugin was just set for this interface: notify subscribers
// CUCIM_LOG_INFO(
// "FrameworkImpl::resolveInterfaceDependency(): default plugin: %s was set for an
// interface: %s", candidate_plugin->getName(), CSTR(desc));
// checkIfBasicPluginsAcquired(entry);
return is_version_semantically_compatible(
desc.version, candidate.get_interface_desc(plugin_manager_).version);
}
else
{
entry.selected = {};
return false;
}
}
}
return false;
}
bool CuCIMFramework::resolve_interface_dependency_with_logging(const Plugin::InterfaceData& desc)
{
return resolve_interface_dependency(desc, true);
}
bool CuCIMFramework::resolve_interface_dependency_no_logging(const Plugin::InterfaceData& desc)
{
return resolve_interface_dependency(desc, false);
}
bool CuCIMFramework::try_terminate_plugin(Plugin* plugin, std::vector<Plugin*>* plugins_to_unload)
{
// // Terminate plugin if all clients released it
// if (!plugin->hasAnyParents())
{
// Shut down the plugin first
plugin->terminate();
// // Release parent <-> child dependency recursively
// const Plugin::InterfaceSet& children = plugin->getChildren();
// for (const Plugin::Interface& child : children)
// {
// releasePluginDependency(plugin->getName(), child, pluginsToUnload);
// }
// plugin->clearChildren();
if (plugins_to_unload)
{
plugins_to_unload->push_back(plugin);
}
else
{
CUCIM_LOG_WARN("%s: out-of-order unloading plugin %s", __func__, plugin->name_cstr());
plugin->unload();
}
return true;
}
return false;
}
Plugin::Interface CuCIMFramework::get_default_plugin(const InterfaceDesc& desc, bool optional)
{
const auto it = interface_candidates_.find(desc.name);
if (it != interface_candidates_.cend())
{
CandidatesEntry& entry = (*it).second;
// If there is already selected plugin for this interface name, take it. Otherwise run
// resolve process with will select plugins for all dependent interfaces recursively
if (!entry.selected.get_plugin(plugin_manager_))
{
resolve_interface_dependency_no_logging(Plugin::InterfaceData{ desc.name, desc.version });
}
// In case of successful resolve there should be a valid candidate in this registry entry
const Plugin::Interface& candidate = entry.selected;
if (candidate.get_plugin(plugin_manager_))
{
// The version still could mismatch in case the candidate is the result of previous getInterface
// calls
if (!is_version_semantically_compatible(desc.version, candidate.get_interface_desc(plugin_manager_).version))
{
if (!optional)
{
// CUCIM_LOG_ERROR(
// "Interface: %s requested. But there is already a plugin with an interface:
// %s loaded. Versions are incompatible. Only one version of the same
// interface/plugin can exist at a time.", CSTR(desc),
// CSTR(candidate.get_interface_desc(plugin_manager_)));
}
return {};
}
return candidate;
}
}
return {};
}
Plugin::Interface CuCIMFramework::get_specific_plugin(const InterfaceDesc& desc, const char* plugin_name, bool optional)
{
// Search plugin by name
Plugin* plugin = get_plugin(plugin_name);
if (!plugin)
{
if (!optional)
{
// CUCIM_LOG_ERROR("Failed to find a plugin with a name: %s", plugin_name);
}
return {};
}
// The interface version or name could mismatch, need to check
const auto& interfaces = plugin->get_interfaces();
Plugin::Interface candidate = {};
for (size_t i = 0; i < interfaces.size(); i++)
{
if (interfaces[i].name == desc.name && is_version_semantically_compatible(desc.version, interfaces[i].version))
{
candidate = { plugin->index_, i };
break;
}
}
Plugin* candidatePlugin = candidate.get_plugin(plugin_manager_);
if (!candidatePlugin)
{
if (!optional)
{
// CUCIM_LOG_ERROR("Interface: %s with a plugin name: %s requested. Interface mismatched, it
// has interfaces: %s",
// CSTR(desc), plugin->name_cstr(), CSTR(plugin->get_interfaces()));
}
return {};
}
// Check deps resolve, the actual resolve process could be triggered here if that's the first time plugin is
// requested
if (!resolve_plugin_dependencies(candidatePlugin))
{
if (!optional)
{
// CUCIM_LOG_ERROR(
// "Interface: %s with a plugin name: %s requested. One of the plugin's dependencies failed
// to resolve.", CSTR(desc), candidatePlugin->name_cstr());
}
return {};
}
return candidate;
}
void CuCIMFramework::unload_all_plugins()
{
ScopedLock g(mutex_);
CUCIM_LOG_VERBOSE("Unload all plugins.");
// Get all plugins from the registry and copy the set (because we are updating registry it inside of loops below)
std::unordered_set<size_t> plugins = plugin_manager_.get_plugin_indices();
// Unregister all plugins which aren't initialized (not used atm).
for (size_t plugin_index : plugins)
{
Plugin* plugin = plugin_manager_.get_plugin(plugin_index);
if (plugin && !plugin->is_initialized())
unregister_plugin(plugin);
}
// Terminate and unload all plugins in reverse order compared to initialization
for (auto it = plugin_load_order_.rbegin(); it != plugin_load_order_.rend(); ++it)
{
Plugin* plugin = get_plugin(*it);
if (plugin)
plugin->terminate();
}
for (auto it = plugin_load_order_.rbegin(); it != plugin_load_order_.rend(); ++it)
{
Plugin* plugin = get_plugin(*it);
if (plugin)
plugin->unload();
}
plugin_load_order_.clear();
// Destroy all plugins in registry
for (size_t plugin_index : plugins)
{
Plugin* plugin = plugin_manager_.get_plugin(plugin_index);
if (plugin)
unregister_plugin(plugin);
}
// m_reloadablePlugins.clear();
interface_candidates_.clear();
// Verify that now everything is back to initial state
CUCIM_ASSERT(plugin_manager_.get_plugin_indices().empty() == true);
CUCIM_ASSERT(name_to_plugin_index_.empty() == true);
CUCIM_ASSERT(library_path_to_plugin_index_.empty() == true);
}
void* CuCIMFramework::acquire_interface(const char* client, const InterfaceDesc& desc, const char* plugin_name, bool optional)
{
if (!client)
return nullptr;
ScopedLock g(mutex_);
const bool acquire_as_default = plugin_name ? false : true;
Plugin::Interface candidate =
acquire_as_default ? get_default_plugin(desc, optional) : get_specific_plugin(desc, plugin_name, optional);
Plugin* plugin = get_plugin(candidate.plugin_index);
if (!plugin)
{
if (!optional)
{
// CUCIM_LOG_ERROR(
// "Failed to acquire interface: %s, by client: %s (plugin name: %s)", CSTR(desc), client,
// pluginName);
}
return nullptr;
}
if (!plugin->is_initialized())
{
// Don't hold the mutex during initialization
g.unlock();
// Lazily initialize plugins only when requested (on demand)
Plugin::InitResult result = plugin->ensure_initialized();
g.lock();
if (result != Plugin::InitResult::kAlreadyInitialized)
{
if (result == Plugin::InitResult::kFailedInitialize)
{
if (!optional)
{
if (plugin->is_in_initialization())
{
// CUCIM_LOG_ERROR(
// "Trying to acquire plugin during it's initialization: %s
// (interfaces: %s) (impl: %s). Circular acquire calls.",
// plugin->name_cstr(), CSTR(plugin->get_interfaces()),
// CSTR(plugin->get_impl_desc()));
}
else
{
// CUCIM_LOG_ERROR("Plugin load failed: %s (interfaces: %s) (impl:
// %s).", plugin->name_cstr(),
// CSTR(plugin->get_interfaces()),
// CSTR(plugin->get_impl_desc()));
}
}
return nullptr;
}
// Add to the load order since loading was successful
// TODO: Replace load order with dependency graph
if (std::find(plugin_load_order_.begin(), plugin_load_order_.end(), plugin->index_) ==
plugin_load_order_.end())
{
plugin_load_order_.push_back(plugin->index_);
}
}
}
// Finish up now that the plugin is initialized
CUCIM_ASSERT(g.owns_lock());
void* iface = candidate.get_interface_desc(plugin_manager_).ptr;
CUCIM_ASSERT(iface);
// Store plugin in the interface->plugin map
ptr_to_interface_[iface] = candidate;
// // Saving callers/clients of a plugin.
// plugin->addParent(candidate.interfaceIndex, client, acquireAsDefault);
//
// // Saving child for a parent
// if (parent)
// parent->addChild(candidate);
return iface;
}
void* CuCIMFramework::acquire_interface_from_library(const char* client,
const InterfaceDesc& desc,
const char* library_path,
bool optional)
{
ScopedLock g(mutex_);
// Check if plugin with this library path was already loaded
const std::string canonical_library_path(library_path);
Plugin* plugin = get_plugin_by_library_path(canonical_library_path);
if (!plugin)
{
// It was not loaded, try to register such plugin and get it again
if (register_plugin(canonical_library_path))
{
plugin = get_plugin_by_library_path(canonical_library_path);
}
}
if (plugin)
{
// Library path leads to valid plugin which now was loaded, try acquire requested interface on it:
return acquire_interface(client, desc, plugin->name_cstr(), optional);
}
return nullptr;
}
bool CuCIMFramework::register_plugin(const std::string& file_path, bool reloadable, bool unload)
{
std::shared_ptr<Plugin> plugin = std::make_shared<Plugin>(file_path);
// Try preload
if (!plugin->preload(reloadable, unload))
{
// CUCIM_LOG_WARN("Potential plugin preload failed: %s", plugin->library_path());
return false;
}
if (register_plugin(plugin))
{
library_path_to_plugin_index_[file_path] = plugin->index_;
return true;
}
return false;
}
// cuCIM-specific methods
void CuCIMFramework::load_plugin(const char* library_path)
{
ScopedLock g(mutex_);
const std::string canonical_library_path(library_path);
Plugin* plugin = get_plugin_by_library_path(canonical_library_path);
// Check if plugin with this library path was already loaded
if (!plugin)
{
// It was not loaded, try to register such plugin and get it again
register_plugin(canonical_library_path);
}
}
std::string& CuCIMFramework::get_plugin_root()
{
return plugin_root_path_;
}
void CuCIMFramework::set_plugin_root(const char* path)
{
plugin_root_path_ = std::string(path);
}
} // namespace cucim
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/plugin_manager.cpp | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin_manager.h"
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/framework.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define CUCIM_EXPORTS
#include "cucim/core/framework.h"
#include "cucim_framework.h"
#include "cucim/macros/defines.h"
#include <memory>
#include <mutex>
CUCIM_FRAMEWORK_GLOBALS("cucim")
namespace cucim
{
static std::unique_ptr<CuCIMFramework> g_framework;
static bool register_plugin(const char* client_name, const PluginRegistrationDesc& desc)
{
CUCIM_ASSERT(g_framework);
return g_framework->register_plugin(client_name, desc);
}
// TODO: need to update for better plugin support - https://github.com/rapidsai/cucim/issues/134
// static void load_plugins(const PluginLoadingDesc& desc)
//{
// CUCIM_ASSERT(g_framework);
// return g_framework->load_plugins(desc);
//}
static void* acquire_interface_from_library_with_client(const char* client_name,
InterfaceDesc desc,
const char* library_path)
{
CUCIM_ASSERT(g_framework);
return g_framework->acquire_interface_from_library(client_name, desc, library_path, false);
}
static void unload_all_plugins()
{
CUCIM_ASSERT(g_framework);
g_framework->unload_all_plugins();
}
static void load_plugin(const char* library_path)
{
CUCIM_ASSERT(g_framework);
g_framework->load_plugin(library_path);
}
static const char* get_plugin_root()
{
CUCIM_ASSERT(g_framework);
return g_framework->get_plugin_root().c_str();
}
static void set_plugin_root(const char* path)
{
CUCIM_ASSERT(g_framework);
g_framework->set_plugin_root(path);
}
static Framework get_framework_impl()
{
// clang-format off
return
{
register_plugin,
acquire_interface_from_library_with_client,
unload_all_plugins,
load_plugin,
get_plugin_root,
set_plugin_root,
};
// clang-format on
}
namespace
{
std::mutex& acquire_framework_mutex()
{
static std::mutex mutex;
return mutex;
}
} // namespace
CUCIM_API Framework* acquire_framework(const char* app_name, Version framework_version)
{
(void) app_name;
(void) framework_version;
// if (!is_version_semantically_compatible(kFrameworkVersion, frameworkVersion))
// {
// // Using CARB_LOG here is pointless because logging hasn't been set up yet.
// fprintf(stderr,
// "[App: %s] Incompatible Framework API version. Framework version: %" PRIu32 ".%" PRIu32
// ". Application requested version: %" PRIu32 ".%" PRIu32 ".\n",
// appName, kFrameworkVersion.major, kFrameworkVersion.minor, frameworkVersion.major,
// frameworkVersion.minor);
// return nullptr;
// }
static Framework framework = get_framework_impl();
if (!g_framework)
{
std::lock_guard<std::mutex> g(acquire_framework_mutex());
if (!g_framework) // Try again after locking mutex
{
g_framework = std::make_unique<CuCIMFramework>();
g_cucim_framework = &framework;
g_cucim_client_name = "cucim";
}
}
return &framework;
}
CUCIM_API void release_framework()
{
std::lock_guard<std::mutex> g(acquire_framework_mutex());
if (g_framework)
{
g_framework->unload_all_plugins();
g_cucim_framework = nullptr;
g_framework.reset(nullptr);
}
}
} // namespace cucim
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/version.inl | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_VERSION_INL
#define CUCIM_VERSION_INL
#include "cucim/core/version.h"
namespace cucim
{
constexpr bool operator<(const InterfaceVersion& lhs, const InterfaceVersion& rhs)
{
if (lhs.major == rhs.major)
{
return lhs.minor < rhs.minor;
}
return lhs.major < rhs.major;
}
constexpr bool operator==(const InterfaceVersion& lhs, const InterfaceVersion& rhs)
{
return lhs.major == rhs.major && lhs.minor == rhs.minor;
}
constexpr bool is_version_semantically_compatible(const InterfaceVersion& minimum, const InterfaceVersion& candidate)
{
if (minimum.major != candidate.major)
{
return false;
}
else
{
// Need to special case when major is equal but zero, then any difference in minor makes them
// incompatible. See http://semver.org for details.
if (minimum.major == 0 && minimum.minor != candidate.minor)
{
return false;
}
}
if (minimum.minor > candidate.minor)
{
return false;
}
return true;
}
} // namespace cucim
#endif // CUCIM_VERSION_INL
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/cucim_plugin.h | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CUCIM_CUCIM_PLUGIN_H
#define CUCIM_CUCIM_PLUGIN_H
#include <string>
#include <vector>
#include <mutex>
#include <sstream>
#include "cucim/core/plugin.h"
#include "cucim/core/interface.h"
#include "cucim/dynlib/helper.h"
#include "plugin_manager.h"
#include "version.inl"
namespace cucim
{
class Plugin
{
public:
enum class ResolveState
{
kUnused,
kInprocess,
kResolved,
kFailed
};
// Returns whether the initialization has happened. didNow is set true only if the initialize happened during this
// call This ensures atomicity between isInitialized/initialize()
enum class InitResult
{
kFailedInitialize = 0,
kAlreadyInitialized,
kDidInitialize
};
struct InterfaceData
{
std::string name;
InterfaceVersion version = { 0, 0 };
void* ptr = nullptr;
uint64_t size = 0;
InterfaceDesc to_interface_desc() const
{
return InterfaceDesc{ name.c_str(), { version.major, version.minor } };
}
void store(const InterfaceDesc& desc);
};
struct ImplementationDesc
{
std::string name;
Version version;
std::string build;
std::string author;
std::string description;
std::string long_description;
std::string license;
std::string url;
std::string platforms;
PluginHotReload hot_reload = PluginHotReload::kDisabled;
PluginImplDesc to_plugin_impl() const
{
return PluginImplDesc{ name.c_str(), version,
build.c_str(), author.c_str(),
description.c_str(), long_description.c_str(),
license.c_str(), url.c_str(),
platforms.c_str(), PluginHotReload::kDisabled};
}
void store(const PluginImplDesc& desc);
};
struct Interface
{
Interface() : plugin_index(kInvalidPluginIndex), interface_index(0)
{
}
Interface(size_t plugin_idx, size_t interface_idx) : plugin_index(plugin_idx), interface_index(interface_idx)
{
}
size_t plugin_index;
size_t interface_index;
Plugin* get_plugin(const PluginManager& registry) const
{
return plugin_index != kInvalidPluginIndex ? registry.get_plugin(plugin_index) : nullptr;
}
const Plugin::InterfaceData& get_interface_desc(const PluginManager& registry) const
{
return registry.get_plugin(plugin_index)->get_interfaces()[interface_index];
}
bool operator==(const Interface& other) const
{
return ((plugin_index == other.plugin_index) && (interface_index == other.interface_index));
}
};
Plugin();
explicit Plugin(const std::string& file_path);
~Plugin();
const char* name_cstr() const
{
return name_.c_str();
}
std::string name_str() const
{
return name_;
}
const char* library_path() const
{
return library_path_.c_str();
}
bool is_initialized() const
{
return is_initialized_;
}
bool is_in_initialization() const
{
return is_in_initialization_;
}
bool preload(bool reloadable, bool unload);
InitResult ensure_initialized();
bool initialize();
void terminate();
void unload();
size_t index_;
ResolveState resolve_state_;
const std::vector<Plugin::InterfaceData>& get_interfaces() const
{
return data_[kVersionStateCurrent].interfaces;
}
const ImplementationDesc& get_impl_desc() const
{
return data_[kVersionStateCurrent].desc;
}
const PluginDesc& get_plugin_desc() const
{
return plugin_desc_;
}
private:
static constexpr uint32_t kVersionStateBackup = 0;
static constexpr uint32_t kVersionStateCurrent = 1;
static constexpr uint32_t kVersionStateCount = 2;
struct VersionedData
{
VersionedData() = default;
int version = 0;
ImplementationDesc desc;
uint64_t interface_size = 0;
std::vector<InterfaceData> interfaces;
std::vector<InterfaceDesc> plugin_interfaces;
std::vector<InterfaceData> dependencies;
std::vector<InterfaceDesc> plugin_dependencies;
};
template <typename T>
bool init_plugin_fn(T& handle, const char* name, bool optional = false) const;
bool prepare_file_to_load(std::string& out_lib_file_path, int version);
bool fill_registration_data(int version, bool full, const std::string& lib_file);
bool check_framework_version();
bool try_load(int version, bool full);
bool load(int version = 0, bool full = true);
VersionedData data_[kVersionStateCount];
std::string library_path_;
std::string name_;
PluginDesc plugin_desc_;
dynlib::LibraryHandle library_handle_;
OnGetFrameworkVersionFn on_get_framework_version_;
OnPluginRegisterFn on_register_;
OnGetPluginDepsFn on_get_deps_;
bool is_loaded_;
bool is_initialized_;
bool is_in_initialization_;
bool is_reloadable_;
int next_version_;
std::recursive_mutex init_lock_;
};
inline bool operator==(const Plugin::InterfaceData& lhs, const Plugin::InterfaceData& rhs)
{
return lhs.name == rhs.name && lhs.version == rhs.version;
}
inline std::ostream& operator<<(std::ostream& o, const Plugin::InterfaceData& info)
{
o << "[" << info.name << " v" << info.version.major << "." << info.version.minor << "]";
return o;
}
inline std::ostream& operator<<(std::ostream& o, const std::vector<Plugin::InterfaceData>& interfaces)
{
for (size_t i = 0; i < interfaces.size(); i++)
{
o << (i > 0 ? "," : "") << interfaces[i];
}
return o;
}
inline std::ostream& operator<<(std::ostream& o, const Plugin::ImplementationDesc& info)
{
o << info.name;
return o;
}
inline std::ostream& operator<<(std::ostream& o, const InterfaceDesc& info)
{
o << Plugin::InterfaceData{ info.name, info.version };
return o;
}
template <class T>
std::string toString(const T& x)
{
std::ostringstream ss;
ss << x;
return ss.str();
}
#define CSTR(x) toString(x).c_str()
} // namespace cucim
#endif // CUCIM_CUCIM_PLUGIN_H
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/core/cucim_plugin.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim_plugin.h"
#include "cucim/core/framework.h"
#include "cucim/core/plugin_util.h"
#include <algorithm>
#include <cstring>
#include <filesystem>
namespace cucim
{
Plugin::Plugin()
: index_(0),
resolve_state_(ResolveState::kUnused),
plugin_desc_(),
library_handle_(nullptr),
on_get_framework_version_(nullptr),
on_register_(nullptr),
on_get_deps_(nullptr),
// m_carbOnPluginPreStartupFn(nullptr),
// m_carbOnPluginStartupFn(nullptr),
// m_carbOnPluginShutdownFn(nullptr),
// m_carbOnPluginPostShutdownFn(nullptr),
// m_carbOnReloadDependencyFn(nullptr),
is_loaded_(false),
is_initialized_(false),
is_in_initialization_(false),
is_reloadable_(false),
next_version_(0)
// m_fileSystem(fs)
{
}
Plugin::Plugin(const std::string& file_path) : Plugin()
{
auto file = std::filesystem::path(file_path);
auto filename = file.filename().string();
std::size_t pivot = filename.find("@");
if (pivot != std::string::npos)
{
std::string plugin_name = filename.substr(0, pivot);
name_ = std::move(plugin_name);
}
else
{
name_ = "cucim.unknown";
}
library_path_ = file_path;
}
Plugin::~Plugin()
{
unload();
}
template <typename T>
bool Plugin::init_plugin_fn(T& handle, const char* name, bool optional) const
{
handle = dynlib::get_library_symbol<T>(library_handle_, name);
if (!handle && !optional)
{
CUCIM_LOG_WARN("[Plugin: %s] Could not locate the function: %s", name_cstr(), name);
return false;
}
return true;
}
bool Plugin::prepare_file_to_load(std::string& out_lib_file_path, int version)
{
(void) version;
// if (!is_reloadable_)
// {
out_lib_file_path = library_path_;
return true;
// }
// if (m_tempFolder.empty())
// {
// CUCIM_LOG_ERROR("Can't load plugin %s as reloadable, temp folder doesn't exist.", getName());
// m_reloadable = false;
// return true;
// }
//
// m_lastWriteTime = m_fileSystem->getModTime(m_libFilePath.c_str());
//
// extras::Path path(m_libFilePath.c_str());
// std::string newLibFilename = path.getStem() + std::to_string(version) + path.getExtension();
// auto newLibPath = m_tempFolder + "/" + newLibFilename;
//
// if (!m_fileSystem->exists(newLibPath.c_str()))
// {
// if (!m_fileSystem->copy(m_libFilePath.c_str(), newLibPath.c_str()))
// {
// return false;
// }
//
//#if CARB_COMPILER_MSC
// extras::Path newPdbFile(newLibPath.c_str());
// newPdbFile.replaceExtension(".pdb");
//
// if (!relinkAndCopyPdbFile(newLibPath.c_str(), newPdbFile))
// {
// CUCIM_LOG_WARN(
// "[Plugin: %s] Couldn't process PDB, debugging may be affected and/or reload may fail.",
// getName());
// }
//#endif
// }
// out_lib_file_path = newLibPath;
// return true;
}
bool Plugin::fill_registration_data(int version, bool full, const std::string& lib_file)
{
(void)lib_file;
// Retrieve registration information
PluginEntry entry;
on_register_(get_framework(), &entry);
// Versioned data to fill:
VersionedData& d = data_[kVersionStateCurrent];
// Sort interfaces by name to keep order always the same
std::sort(entry.interfaces, entry.interfaces + entry.interface_count,
[](const PluginEntry::Interface& a, const PluginEntry::Interface& b) -> bool {
return std::strcmp(a.desc.name, b.desc.name) < 0;
});
d.plugin_interfaces.resize(entry.interface_count);
d.interfaces.resize(entry.interface_count);
for (size_t i = 0; i < entry.interface_count; i++)
{
d.interfaces[i].store(entry.interfaces[i].desc);
d.plugin_interfaces[i] = d.interfaces[i].to_interface_desc();
}
d.desc.store(entry.desc);
name_ = d.desc.name;
if (full)
{
// // Load the plugin interfaces
// {
// // Prepare interface buffers count
// if (version == 0)
// {
// m_interfaceBufs.resize(entry.interfaceCount);
// m_interfaceParents.resize(entry.interfaceCount);
// }
// else
// {
// if (m_interfaceBufs.size() != entry.interfaceCount)
// {
// CUCIM_LOG_ERROR(
// "[Plugin: %s] New version is incompatible for reload: interfaces count changed.",
// getName());
// return false;
// }
// }
//
for (size_t i = 0; i < entry.interface_count; i++)
{
const void* iface_ptr = entry.interfaces[i].ptr;
uint64_t iface_size = entry.interfaces[i].size;
// if (ifaceSize == 0 || ifacePtr == nullptr)
// {
// CUCIM_LOG_ERROR("[Plugin: %s] Interface is empty.", name_cstr());
// return false;
// }
// if (version == 0)
// {
// // First time allocating place for an interface buffer of a particular interface
// // let's for now reserve twice as much space in case the plugin will be reloaded (or
// implementation
// // changes to other version) in runtime. That would allow it to grow.
// m_interfaceBufs[i].resize(ifaceSize * 2);
// }
// if (m_interfaceBufs[i].size() < ifaceSize)
// {
// CUCIM_LOG_ERROR("[Plugin: %s] New version is incompatible for reload: interface size
// grown too much.",
// getName());
// return false;
// }
// // Copy an interface in a buffer, that allows us to reuse the same pointer if a plugin
// is reloaded. std::memcpy(m_interfaceBufs[i].data(), ifacePtr, ifaceSize);
d.interfaces[i].ptr = const_cast<void*>(iface_ptr); // m_interfaceBufs[i].data();
d.interfaces[i].size = iface_size;
}
// }
}
//
// // Data sections:
// if (m_reloadable && full && !lib_file.empty())
// {
// // Failed to load sections
// if (!loadSections(m_fileSystem, m_libraryHandle, lib_file, d.bssSection, d.stateSection))
// m_reloadable = false;
// }
//
// // Get dependencies
// d.dependencies.clear();
// d.pluginDependencies.clear();
// if (m_carbGetPluginDepsFn)
// {
// InterfaceDesc* depDescs;
// size_t depDescCount;
// m_carbGetPluginDepsFn(&depDescs, &depDescCount);
// d.dependencies.reserve(depDescCount);
// d.pluginDependencies.resize(depDescCount);
// for (size_t i = 0; i < depDescCount; i++)
// {
// d.dependencies.push_back({ depDescs[i].name, depDescs[i].version });
// d.pluginDependencies[i] = d.dependencies[i].to_interface_desc();
// }
// }
// Fill PluginDesc
plugin_desc_ = { get_impl_desc().to_plugin_impl(), library_path_.c_str(), d.plugin_interfaces.data(),
d.plugin_interfaces.size(), d.plugin_dependencies.data(), d.plugin_dependencies.size() };
// Save version
d.version = version;
return true;
}
bool Plugin::check_framework_version()
{
const Version version = on_get_framework_version_();
if (kFrameworkVersion.major != version.major)
{
CUCIM_LOG_ERROR(
"[Plugin: %s] Incompatible Framework API major version: %" PRIu32 "", name_cstr(), kFrameworkVersion.major);
return false;
}
if (kFrameworkVersion.minor < version.minor)
{
CUCIM_LOG_ERROR(
"[Plugin: %s] Incompatible Framework API minor version: %" PRIu32 "", name_cstr(), kFrameworkVersion.major);
return false;
}
return true;
}
bool Plugin::try_load(int version, bool full)
{
if (is_loaded_)
{
return is_loaded_;
}
// CUCIM_LOG_VERBOSE("[Plugin: %s] %s", name_cstr(), full ? "Loading..." : "Preloading...");
std::string lib_file;
if (!prepare_file_to_load(lib_file, version))
{
return false;
}
// Load library
// CUCIM_LOG_VERBOSE("[Plugin: %s] Loading the dynamic library from: %s", name_cstr(), lib_file.c_str());
library_handle_ = dynlib::load_library(lib_file.c_str());
if (!library_handle_)
{
CUCIM_LOG_ERROR("[Plugin: %s] Could not load the dynamic library from %s. Error: %s", name_cstr(),
lib_file.c_str(), dynlib::get_last_load_library_error().c_str());
return false;
}
// Load all the plugin function handles
if (!init_plugin_fn(on_get_framework_version_, kCuCIMOnGetFrameworkVersionFnName))
return false;
if (!check_framework_version())
return false;
if (!init_plugin_fn(on_register_, kCuCIMOnPluginRegisterFnName))
return false;
if (!init_plugin_fn(on_get_deps_, kCuCIMOnGetPluginDepsFnName, true))
return false;
// if (full)
// {
// init_plugin_fn(m_carbOnPluginPreStartupFn, kCarbOnPluginPreStartupFnName, true);
// init_plugin_fn(m_carbOnPluginStartupFn, kCarbOnPluginStartupFnName, true);
// init_plugin_fn(m_carbOnPluginShutdownFn, kCarbOnPluginShutdownFnName, true);
// init_plugin_fn(m_carbOnPluginPostShutdownFn, kCarbOnPluginPostShutdownFnName, true);
// init_plugin_fn(m_carbOnReloadDependencyFn, kCarbOnReloadDependencyFnName, true);
// }
// Register
if (!fill_registration_data(version, full, lib_file))
{
CUCIM_LOG_ERROR("[Plugin: %s] Could not load the dynamic library from %s. Error: fill_registration_data() failed!",
name_cstr(), lib_file.c_str());
return false;
}
// Load was successful
// CUCIM_LOG_VERBOSE("[Plugin: %s] %s successfully. Version: %d", name_cstr(), full ? "loaded" : "preloaded",
// version);
is_loaded_ = true;
return is_loaded_;
}
bool Plugin::load(int version, bool full)
{
if (!try_load(version, full))
{
unload();
return false;
}
return true;
}
void Plugin::unload()
{
if (library_handle_)
{
dynlib::unload_library(library_handle_);
library_handle_ = nullptr;
is_loaded_ = false;
// CUCIM_LOG_VERBOSE("[Plugin: %s] Unloaded.", name_cstr());
}
}
bool Plugin::preload(bool reloadable, bool unload)
{
is_reloadable_ = reloadable;
bool full_load = !unload;
if (load(0, full_load))
{
if (unload)
this->unload();
return true;
}
return false;
}
Plugin::InitResult Plugin::ensure_initialized()
{
// Fast path: already initialized
if (is_initialized_)
{
return InitResult::kAlreadyInitialized;
}
// Check again after locking mutex
std::lock_guard<std::recursive_mutex> lock(init_lock_);
if (is_initialized_)
{
return InitResult::kAlreadyInitialized;
}
return initialize() ? InitResult::kDidInitialize : InitResult::kFailedInitialize;
}
bool Plugin::initialize()
{
std::lock_guard<std::recursive_mutex> lock(init_lock_);
// another thread could have beaten us into the locked region between when the 'initialized'
// flag was originally checked (before this call) and when the lock was actually acquired.
// If this flag is set, that means the other thread won and the plugin has already been
// fully initialized. In this case there is nothing left for us to do here but succeed.
if (is_initialized_)
{
return true;
}
if (is_in_initialization_)
{
// Don't recursively initialize
return false;
}
// CUCIM_LOG_INFO("Initializing plugin: %s (interfaces: %s) (impl: %s)", name_cstr(), CSTR(get_interfaces()),
// CSTR(get_impl_desc()));
is_in_initialization_ = true;
// failed to load the plugin library itself => fail and allow the caller to try again later.
if (load(next_version_++))
{
// // run the pre-startup function for the plugin.
// if (m_carbOnPluginPreStartupFn)
// {
// m_carbOnPluginPreStartupFn();
// }
//
// // run the startup function for the plugin.
// if (m_carbOnPluginStartupFn)
// {
// m_carbOnPluginStartupFn();
// }
is_initialized_ = true;
}
is_in_initialization_ = false;
return is_initialized_;
}
void Plugin::terminate()
{
std::lock_guard<std::recursive_mutex> lock(init_lock_);
if (!is_initialized_ || !is_loaded_)
return;
// if (m_carbOnPluginShutdownFn)
// {
// m_carbOnPluginShutdownFn();
// }
//
// if (m_carbOnPluginPostShutdownFn)
// {
// m_carbOnPluginPostShutdownFn();
// }
is_initialized_ = false;
}
static void update_if_changed(std::string& str, const char* value)
{
if (str != value)
str = value;
}
void Plugin::InterfaceData::store(const InterfaceDesc& desc)
{
update_if_changed(name, desc.name);
version = desc.version;
}
void Plugin::ImplementationDesc::store(const PluginImplDesc& desc)
{
update_if_changed(name, desc.name);
version = desc.version;
update_if_changed(build, desc.build);
update_if_changed(author, desc.author);
update_if_changed(description, desc.description);
update_if_changed(long_description, desc.long_description);
update_if_changed(license, desc.license);
update_if_changed(url, desc.url);
update_if_changed(platforms, desc.platforms);
hot_reload = desc.hot_reload;
}
} // namespace cucim
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/profiler/profiler.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/profiler/profiler.h"
#include "cucim/cuimage.h"
namespace cucim::profiler
{
Profiler::Profiler(ProfilerConfig& config) : config_(config){};
ProfilerConfig& Profiler::config()
{
return config_;
}
ProfilerConfig Profiler::get_config() const
{
return config_;
}
void Profiler::trace(bool value)
{
config_.trace = value;
}
/**
* @brief Return whether if trace is enabled or not
*
* @return true if profiler is enabled. false otherwise
*/
bool Profiler::trace() const
{
return config_.trace;
}
} // namespace cucim::profiler
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/profiler/profiler_config.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/profiler/profiler_config.h"
#include <fmt/format.h>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace cucim::profiler
{
void ProfilerConfig::load_config(const void* json_obj)
{
const json& profiler_config = *(static_cast<const json*>(json_obj));
if (profiler_config.contains("trace") && profiler_config["trace"].is_boolean())
{
trace = profiler_config.value("trace", kDefaultProfilerTrace);
}
}
} // namespace cucim::profiler
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/filesystem/file_handle.cpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/filesystem/file_handle.h"
#include <sys/stat.h>
#include "cucim/codec/hash_function.h"
CuCIMFileHandle::CuCIMFileHandle()
: fd(-1),
cufile(nullptr),
type(FileHandleType::kUnknown),
path(nullptr),
client_data(nullptr),
hash_value(0),
dev(0),
ino(0),
mtime(0),
own_fd(false)
{
}
CuCIMFileHandle::CuCIMFileHandle(int fd, CUfileHandle_t cufile, FileHandleType type, char* path, void* client_data)
: fd(fd), cufile(cufile), type(type), path(path), client_data(client_data)
{
struct stat st;
fstat(fd, &st);
dev = static_cast<uint64_t>(st.st_dev);
ino = static_cast<uint64_t>(st.st_ino);
mtime = static_cast<uint64_t>(st.st_mtim.tv_nsec);
hash_value = cucim::codec::splitmix64_3(dev, ino, mtime);
}
CuCIMFileHandle::CuCIMFileHandle(int fd,
CUfileHandle_t cufile,
FileHandleType type,
char* path,
void* client_data,
uint64_t dev,
uint64_t ino,
int64_t mtime,
bool own_fd)
: fd(fd), cufile(cufile), type(type), path(path), client_data(client_data), dev(dev), ino(ino), mtime(mtime), own_fd(own_fd)
{
hash_value = cucim::codec::splitmix64_3(dev, ino, mtime);
}
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/filesystem/cufile_driver.cpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/filesystem/cufile_driver.h"
#include <fcntl.h>
#include <linux/fs.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <unistd.h>
#include <chrono>
#include <cuda_runtime.h>
#include <fmt/format.h>
#include "cucim/util/cuda.h"
#include "cucim/util/platform.h"
#include "cufile_stub.h"
#define ALIGN_UP(x, align_to) (((uint64_t)(x) + ((uint64_t)(align_to)-1)) & ~((uint64_t)(align_to)-1))
#define ALIGN_DOWN(x, align_to) ((uint64_t)(x) & ~((uint64_t)(align_to)-1))
namespace cucim::filesystem
{
static constexpr unsigned int PAGE_SIZE = 4096;
static constexpr uint64_t DEFAULT_MAX_CACHE_SIZE = 128 << 20; // 128MiB
static CuFileStub s_cufile_stub;
static CuFileDriverInitializer s_cufile_initializer;
thread_local static CuFileDriverCache s_cufile_cache;
Mutex CuFileDriver::driver_mutex_;
static std::string get_fd_path(int fd)
{
pid_t pid = getpid();
ssize_t file_path_len = 0;
char real_path[PATH_MAX];
std::string src_path = fmt::format("/proc/{}/fd/{}", pid, fd);
if ((file_path_len = readlink(src_path.c_str(), real_path, PATH_MAX - 1)) > 0)
{
real_path[file_path_len] = '\0';
}
else
{
throw std::runtime_error(fmt::format("Cannot get the real path from process entries ({})", strerror(errno)));
}
return std::string(real_path);
}
static int get_file_flags(const char* flags)
{
int file_flags = -1;
if (flags == nullptr || flags[0] == '\0')
{
return -1;
}
switch (flags[0])
{
case 'r':
file_flags = O_RDONLY;
if (flags[1] == '+')
{
file_flags = O_RDWR;
}
break;
case 'w':
file_flags = O_RDWR | O_CREAT | O_TRUNC;
break;
case 'a':
file_flags = O_RDWR | O_CREAT;
break;
default:
return -1;
}
file_flags |= O_CLOEXEC;
return file_flags;
}
bool is_gds_available()
{
return static_cast<bool>(s_cufile_initializer) && !cucim::util::is_in_wsl();
}
std::shared_ptr<CuFileDriver> open(const char* file_path, const char* flags, mode_t mode)
{
bool use_o_direct = true;
bool no_gds = false;
bool use_mmap = false;
int file_flags = get_file_flags(flags);
for (const char* ch = (flags[1] == '+' ? &flags[2] : &flags[1]); *ch; ch++)
switch (*ch)
{
case 'n':
use_o_direct = false;
break;
case 'p':
no_gds = true;
break;
case 'm':
use_mmap = true;
break;
}
if (use_o_direct)
{
file_flags |= O_DIRECT;
}
if (file_flags < 0)
{
return std::shared_ptr<CuFileDriver>();
}
FileHandleType file_type = (file_flags & O_DIRECT ? FileHandleType::kPosixODirect : FileHandleType::kPosix);
int fd = ::open(file_path, file_flags, mode);
if (fd < 0)
{
if (errno == ENOENT)
{
throw std::invalid_argument(fmt::format("File '{}' doesn't exist!", file_path));
}
if (file_type == FileHandleType::kPosix)
{
throw std::invalid_argument(fmt::format("File '{}' cannot be open!", file_path));
}
else // if kFileHandlePosixODirect
{
file_flags &= ~O_DIRECT;
fd = ::open(file_path, file_flags, mode);
fmt::print(
stderr, "The file {} doesn't support O_DIRECT. Trying to open the file without O_DIRECT\n", file_path);
if (fd < 0)
{
throw std::invalid_argument(fmt::format("File '{}' cannot be open!", file_path));
}
file_type = FileHandleType::kPosix; // POSIX
}
}
const auto handle = std::make_shared<CuFileDriver>(fd, no_gds, use_mmap, file_path);
// Set ownership to the file descriptor
if (handle->handle_)
{
handle->handle_->own_fd = true;
}
return handle;
}
std::shared_ptr<CuFileDriver> open(int fd, bool no_gds, bool use_mmap)
{
return std::make_shared<CuFileDriver>(fd, no_gds, use_mmap, nullptr);
}
CuFileDriver::CuFileDriver(int fd, bool no_gds, bool use_mmap, const char* file_path)
{
if (file_path == nullptr || *file_path == '\0')
{
file_path_ = get_fd_path(fd);
}
else
{
file_path_ = file_path;
}
struct stat st;
fstat(fd, &st);
file_size_ = st.st_size;
int flags;
// Note: the following method cannot detect flags such as O_EXCL and O_TRUNC.
flags = fcntl(fd, F_GETFL);
if (flags < 0)
{
throw std::runtime_error(fmt::format("[Error] fcntl failed for fd {} ({})", fd, std::strerror(errno)));
}
file_flags_ = flags;
FileHandleType file_type = (flags & O_DIRECT) ? FileHandleType::kPosixODirect : FileHandleType::kPosix;
// Copy file path (Allocated memory would be freed at close() method.)
char* file_path_cstr = static_cast<char*>(cucim_malloc(file_path_.size() + 1));
memcpy(file_path_cstr, file_path_.c_str(), file_path_.size());
file_path_cstr[file_path_.size()] = '\0';
handle_ = std::make_shared<CuCIMFileHandle>(fd, nullptr, file_type, const_cast<char*>(file_path_cstr), this,
static_cast<uint64_t>(st.st_dev), static_cast<uint64_t>(st.st_ino),
static_cast<int64_t>(st.st_mtim.tv_nsec), false);
CUfileError_t status;
CUfileDescr_t cf_descr{}; // It is important to set zero!
if ((file_type == FileHandleType::kPosixODirect || file_type == FileHandleType::kGPUDirect) && !no_gds &&
!use_mmap && s_cufile_initializer)
{
cf_descr.handle.fd = fd;
cf_descr.type = CU_FILE_HANDLE_TYPE_OPAQUE_FD;
status = cuFileHandleRegister(&handle_->cufile, &cf_descr);
if (status.err == CU_FILE_SUCCESS)
{
handle_->type = FileHandleType::kGPUDirect;
}
else
{
fmt::print(
stderr,
"[Error] cuFileHandleRegister fd: {} ({}), status: {}. Would work with cuCIM's compatibility mode.\n",
fd, file_path_, cufileop_status_error(status.err));
}
}
else if (use_mmap)
{
if (flags & (O_RDWR || O_WRONLY))
{
throw std::runtime_error(
fmt::format("[Error] Memory-mapped IO for writable file descriptor is not supported!"));
}
mmap_ptr_ = mmap((void*)0, file_size_, PROT_READ, MAP_SHARED, fd, 0);
if (mmap_ptr_ != MAP_FAILED)
{
handle_->type = FileHandleType::kMemoryMapped;
}
else
{
mmap_ptr_ = nullptr;
throw std::runtime_error(fmt::format("[Error] failed to call mmap ({})", std::strerror(errno)));
}
}
}
bool close(const std::shared_ptr<CuFileDriver>& fd)
{
return fd->close();
}
ssize_t pread(const std::shared_ptr<CuFileDriver>& fd, void* buf, size_t count, off_t file_offset, off_t buf_offset)
{
if (fd != nullptr)
{
return fd->pread(buf, count, file_offset, buf_offset);
}
else
{
fmt::print(stderr, "fd (CuFileDriver) is null!\n");
return -1;
}
}
ssize_t pwrite(const std::shared_ptr<CuFileDriver>& fd, const void* buf, size_t count, off_t file_offset, off_t buf_offset)
{
if (fd != nullptr)
{
return fd->pwrite(buf, count, file_offset, buf_offset);
}
else
{
fmt::print(stderr, "fd (CuFileDriver) is null!\n");
return -1;
}
}
bool discard_page_cache(const char* file_path)
{
int fd = ::open(file_path, O_RDONLY);
if (fd < 0)
{
return false;
}
if (::fdatasync(fd) < 0)
{
return false;
}
if (::posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED) < 0)
{
return false;
}
if (::close(fd) < 0)
{
return false;
}
return true;
}
CuFileDriverInitializer::CuFileDriverInitializer()
{
// Initialize libcufile library
s_cufile_stub.load();
CUfileError_t status = cuFileDriverOpen();
if (status.err == CU_FILE_SUCCESS)
{
is_available_ = true;
CUfileDrvProps_t props;
status = cuFileDriverGetProperties(&props);
if (status.err == CU_FILE_SUCCESS)
{
// kb -> bytes
max_device_cache_size_ = static_cast<uint64_t>(props.max_device_cache_size) << 10;
max_host_cache_size_ = static_cast<uint64_t>(props.max_device_cache_size) << 10;
}
else
{
fmt::print(stderr, "cuFileDriverGetProperties() failed!\n");
}
// fmt::print(stderr, "CuFileDriver opened!\n");
}
else
{
is_available_ = false;
max_device_cache_size_ = DEFAULT_MAX_CACHE_SIZE;
max_host_cache_size_ = DEFAULT_MAX_CACHE_SIZE;
// fmt::print(stderr, "[Warning] CuFileDriver cannot be open. Falling back to use POSIX file IO APIs.\n");
}
}
CuFileDriverInitializer::~CuFileDriverInitializer()
{
if (is_available_)
{
CUfileError_t status = cuFileDriverClose();
if (status.err != CU_FILE_SUCCESS)
{
fmt::print(stderr, "Unable to close cuFileDriver ({})\n", cufileop_status_error(status.err));
}
else
{
// fmt::print(stderr, "CuFileDriver closed!\n");
}
is_available_ = false;
}
// Close cufile stub
s_cufile_stub.unload();
}
CuFileDriverCache::CuFileDriverCache()
{
}
void* CuFileDriverCache::device_cache()
{
if (device_cache_)
{
return device_cache_aligned_;
}
else
{
cudaError_t cuda_status;
unsigned int cache_size = s_cufile_initializer.max_device_cache_size();
CUDA_TRY(cudaMalloc(&device_cache_, PAGE_SIZE + cache_size));
if (cuda_status)
{
throw std::bad_alloc();
}
device_cache_aligned_ = reinterpret_cast<void*>(ALIGN_UP(device_cache_, PAGE_SIZE));
CUfileError_t status = cuFileBufRegister(device_cache_aligned_, cache_size, 0);
if (status.err != CU_FILE_SUCCESS)
{
CUDA_TRY(cudaFree(device_cache_));
device_cache_ = nullptr;
device_cache_aligned_ = nullptr;
if (cuda_status)
{
throw std::bad_alloc();
}
throw std::runtime_error("Failed to call cuFileBufRegister()!");
}
return device_cache_aligned_;
}
}
void* CuFileDriverCache::host_cache()
{
if (host_cache_)
{
return host_cache_aligned_;
}
else
{
if (posix_memalign(&host_cache_, PAGE_SIZE, s_cufile_initializer.max_host_cache_size()))
{
throw std::bad_alloc();
}
host_cache_aligned_ = host_cache_;
return host_cache_aligned_;
}
}
CuFileDriverCache::~CuFileDriverCache()
{
if (device_cache_)
{
cudaError_t cuda_status;
CUfileError_t status = cuFileBufDeregister(device_cache_aligned_);
if (status.err != CU_FILE_SUCCESS)
{
fmt::print(stderr, "Failed on cuFileBufDeregister()! (status: {})\n", cufileop_status_error(status.err));
}
CUDA_TRY(cudaFree(device_cache_));
if (cuda_status)
{
fmt::print(stderr, "Failed on cudaFree()!\n");
}
device_cache_ = nullptr;
device_cache_aligned_ = nullptr;
}
if (host_cache_)
{
free(host_cache_);
host_cache_ = nullptr;
host_cache_aligned_ = nullptr;
}
}
ssize_t CuFileDriver::pread(void* buf, size_t count, off_t file_offset, off_t buf_offset) const
{
if (file_flags_ == -1)
{
fmt::print(stderr, "File is not open yet.\n");
return -1;
}
if ((file_flags_ & O_ACCMODE) == O_WRONLY)
{
fmt::print(stderr, "The file is open with write-only mode!\n");
return -1;
}
cudaError_t cuda_status;
ssize_t total_read_cnt = 0;
cudaPointerAttributes attributes;
cudaMemoryType memory_type;
FileHandleType file_type = handle_->type;
CUDA_TRY(cudaPointerGetAttributes(&attributes, buf));
if (cuda_status)
{
// if (cuda_status == cudaErrorInvalidValue)
// {
// attributes.type = cudaMemoryTypeDevice;
// }
// else
// {
return -1;
// }
}
memory_type = attributes.type;
if (file_type == FileHandleType::kPosix)
{
if (memory_type != cudaMemoryTypeUnregistered)
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint64_t remaining_size = count;
ssize_t read_cnt;
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
uint8_t* output_buf = static_cast<uint8_t*>(buf) + buf_offset;
off_t read_offset = file_offset;
while (true)
{
size_t bytes_to_copy = std::min(cache_size, remaining_size);
if (bytes_to_copy == 0)
{
break;
}
read_cnt = ::pread(handle_->fd, cache_buf, bytes_to_copy, read_offset);
CUDA_TRY(cudaMemcpy(output_buf, cache_buf, bytes_to_copy, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
read_offset += read_cnt;
output_buf += read_cnt;
remaining_size -= read_cnt;
total_read_cnt += bytes_to_copy;
}
}
else
{
total_read_cnt = ::pread(handle_->fd, reinterpret_cast<char*>(buf) + buf_offset, count, file_offset);
}
}
else if (file_type == FileHandleType::kMemoryMapped)
{
if (memory_type != cudaMemoryTypeUnregistered)
{
CUDA_TRY(cudaMemcpy(reinterpret_cast<char*>(buf) + buf_offset,
reinterpret_cast<char*>(mmap_ptr_) + file_offset, count, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
else
{
memcpy(reinterpret_cast<char*>(buf) + buf_offset, reinterpret_cast<char*>(mmap_ptr_) + file_offset, count);
}
total_read_cnt = count;
}
else if (memory_type == cudaMemoryTypeUnregistered || handle_->type == FileHandleType::kPosixODirect)
{
uint64_t buf_align = (reinterpret_cast<uint64_t>(buf) + buf_offset) % PAGE_SIZE;
bool is_aligned = (buf_align == 0) && ((file_offset % PAGE_SIZE) == 0);
if (is_aligned)
{
ssize_t read_cnt;
size_t block_read_size = ALIGN_DOWN(count, PAGE_SIZE);
if (block_read_size > 0)
{
if (memory_type == cudaMemoryTypeUnregistered)
{
read_cnt =
::pread(handle_->fd, reinterpret_cast<char*>(buf) + buf_offset, block_read_size, file_offset);
total_read_cnt += read_cnt;
}
else
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint64_t remaining_size = block_read_size;
ssize_t read_cnt;
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
uint8_t* input_buf = static_cast<uint8_t*>(buf) + buf_offset;
off_t read_offset = file_offset;
while (true)
{
size_t bytes_to_copy = std::min(cache_size, remaining_size);
if (bytes_to_copy == 0)
{
break;
}
read_cnt = ::pread(handle_->fd, cache_buf, bytes_to_copy, read_offset);
CUDA_TRY(cudaMemcpy(input_buf, cache_buf, bytes_to_copy, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
read_offset += read_cnt;
input_buf += read_cnt;
remaining_size -= read_cnt;
total_read_cnt += bytes_to_copy;
}
}
}
size_t remaining = count - block_read_size;
if (remaining)
{
uint8_t internal_buf[PAGE_SIZE * 2]; // no need to initialize for pread()
uint8_t* buf_pos = reinterpret_cast<uint8_t*>(ALIGN_UP(static_cast<uint8_t*>(internal_buf), PAGE_SIZE));
// Read the remaining block (size of PAGE_SIZE)
ssize_t read_cnt;
read_cnt = ::pread(handle_->fd, buf_pos, PAGE_SIZE, block_read_size);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the remaining file content block! ({})\n", std::strerror(errno));
return -1;
}
// Copy a buffer to read, from the intermediate remaining block (buf_pos)
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(reinterpret_cast<uint8_t*>(buf) + buf_offset + block_read_size, buf_pos, remaining);
}
else
{
CUDA_TRY(cudaMemcpy(reinterpret_cast<uint8_t*>(buf) + buf_offset + block_read_size, buf_pos,
remaining, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
total_read_cnt += remaining;
}
}
else
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
off_t file_start_offset = ALIGN_DOWN(file_offset, PAGE_SIZE);
off_t end_offset = count + file_offset;
off_t end_boundary_offset = ALIGN_UP(end_offset, PAGE_SIZE);
size_t large_block_size = end_boundary_offset - file_start_offset;
off_t page_offset = file_offset - file_start_offset;
uint8_t* output_buf = static_cast<uint8_t*>(buf) + buf_offset;
if (large_block_size <= cache_size) // Optimize if bytes to load is less than cache_size
{
ssize_t read_cnt = ::pread(handle_->fd, cache_buf, large_block_size, file_start_offset);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the file content block! ({})\n", std::strerror(errno));
return -1;
}
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(output_buf, cache_buf + page_offset, count);
}
else
{
CUDA_TRY(cudaMemcpy(output_buf, cache_buf + page_offset, count, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
total_read_cnt += std::min(static_cast<size_t>(read_cnt - page_offset), count);
}
else
{
off_t overflow_offset = page_offset + count;
size_t header_size = (overflow_offset > PAGE_SIZE) ? PAGE_SIZE - page_offset : count;
size_t tail_size = (overflow_offset > PAGE_SIZE) ? end_offset - ALIGN_DOWN(end_offset, PAGE_SIZE) : 0;
uint64_t body_remaining_size = count - header_size - tail_size;
off_t read_offset = file_start_offset;
size_t bytes_to_copy;
ssize_t read_cnt;
uint8_t internal_buf[PAGE_SIZE * 2]; // no need to initialize for pread()
uint8_t* internal_buf_pos =
reinterpret_cast<uint8_t*>(ALIGN_UP(static_cast<uint8_t*>(internal_buf), PAGE_SIZE));
// Handle the head part of the file content
if (header_size)
{
read_cnt = ::pread(handle_->fd, internal_buf_pos, PAGE_SIZE, read_offset);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the head part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
bytes_to_copy = header_size;
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(output_buf, internal_buf_pos + page_offset, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(
output_buf, internal_buf_pos + page_offset, bytes_to_copy, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
output_buf += bytes_to_copy;
read_offset += read_cnt;
total_read_cnt += bytes_to_copy;
}
// Copy n * PAGE_SIZE bytes
while (true)
{
size_t bytes_to_copy = std::min(cache_size, body_remaining_size);
if (bytes_to_copy == 0)
{
break;
}
read_cnt = ::pread(handle_->fd, cache_buf, bytes_to_copy, read_offset);
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(output_buf, cache_buf, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(output_buf, cache_buf, bytes_to_copy, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
read_offset += read_cnt;
output_buf += read_cnt;
body_remaining_size -= read_cnt;
total_read_cnt += bytes_to_copy;
}
// Handle the tail part of the file content
if (tail_size)
{
// memset(internal_buf_pos, 0, PAGE_SIZE); // no need to initialize for pread()
read_cnt = ::pread(handle_->fd, internal_buf_pos, PAGE_SIZE, read_offset);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the tail part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
// Copy the region
bytes_to_copy = tail_size;
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(output_buf, internal_buf_pos, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(output_buf, internal_buf_pos, bytes_to_copy, cudaMemcpyHostToDevice));
if (cuda_status)
{
return -1;
}
}
total_read_cnt += tail_size;
}
}
}
}
else if (file_type == FileHandleType::kGPUDirect)
{
(void*)s_cufile_cache.device_cache(); // Lazy initialization
ssize_t read_cnt = cuFileRead(handle_->cufile, reinterpret_cast<char*>(buf) + buf_offset, count, file_offset, 0);
total_read_cnt += read_cnt;
if (read_cnt < 0)
{
fmt::print(stderr, "Failed to read file with cuFileRead().\n");
return -1;
}
}
return total_read_cnt;
}
ssize_t CuFileDriver::pwrite(const void* buf, size_t count, off_t file_offset, off_t buf_offset)
{
if (file_flags_ == -1)
{
fmt::print(stderr, "File is not open yet.\n");
return -1;
}
if ((file_flags_ & O_ACCMODE) == O_RDONLY)
{
fmt::print(stderr, "The file is open with read-only mode!\n");
return -1;
}
cudaError_t cuda_status;
ssize_t total_write_cnt = 0;
cudaPointerAttributes attributes;
cudaMemoryType memory_type;
FileHandleType file_type = handle_->type;
CUDA_TRY(cudaPointerGetAttributes(&attributes, buf));
if (cuda_status)
{
return -1;
}
memory_type = attributes.type;
if (file_type == FileHandleType::kPosix)
{
if (memory_type != cudaMemoryTypeUnregistered)
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint64_t remaining_size = count;
ssize_t write_cnt;
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
const uint8_t* input_buf = static_cast<const uint8_t*>(buf) + buf_offset;
off_t write_offset = file_offset;
while (true)
{
size_t bytes_to_copy = std::min(cache_size, remaining_size);
if (bytes_to_copy == 0)
{
break;
}
CUDA_TRY(cudaMemcpy(cache_buf, input_buf, bytes_to_copy, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
write_cnt = ::pwrite(handle_->fd, cache_buf, bytes_to_copy, write_offset);
write_offset += write_cnt;
input_buf += write_cnt;
remaining_size -= write_cnt;
total_write_cnt += bytes_to_copy;
}
}
else
{
total_write_cnt = ::pwrite(handle_->fd, reinterpret_cast<const char*>(buf) + buf_offset, count, file_offset);
}
}
else if (file_type == FileHandleType::kMemoryMapped)
{
fmt::print(stderr, "[Error] pwrite() is not supported for Memory-mapped IO file type!\n");
return -1;
}
else if (memory_type == cudaMemoryTypeUnregistered || handle_->type == FileHandleType::kPosixODirect)
{
uint64_t buf_align = (reinterpret_cast<uint64_t>(buf) + buf_offset) % PAGE_SIZE;
bool is_aligned = (buf_align == 0) && ((file_offset % PAGE_SIZE) == 0);
if (is_aligned)
{
ssize_t write_cnt;
size_t block_write_size = ALIGN_DOWN(count, PAGE_SIZE);
if (block_write_size > 0)
{
if (memory_type == cudaMemoryTypeUnregistered)
{
write_cnt = ::pwrite(
handle_->fd, reinterpret_cast<const char*>(buf) + buf_offset, block_write_size, file_offset);
total_write_cnt += write_cnt;
}
else
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint64_t remaining_size = block_write_size;
ssize_t write_cnt;
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
const uint8_t* input_buf = static_cast<const uint8_t*>(buf) + buf_offset;
off_t write_offset = file_offset;
while (true)
{
size_t bytes_to_copy = std::min(cache_size, remaining_size);
if (bytes_to_copy == 0)
{
break;
}
CUDA_TRY(cudaMemcpy(cache_buf, input_buf, bytes_to_copy, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
write_cnt = ::pwrite(handle_->fd, cache_buf, bytes_to_copy, write_offset);
write_offset += write_cnt;
input_buf += write_cnt;
remaining_size -= write_cnt;
total_write_cnt += bytes_to_copy;
}
}
}
size_t remaining = count - block_write_size;
if (remaining)
{
uint8_t internal_buf[PAGE_SIZE * 2]{};
uint8_t* internal_buf_pos =
reinterpret_cast<uint8_t*>(ALIGN_UP(static_cast<uint8_t*>(internal_buf), PAGE_SIZE));
// Read the remaining block (size of PAGE_SIZE)
ssize_t read_cnt;
read_cnt = ::pread(handle_->fd, internal_buf_pos, PAGE_SIZE, block_write_size);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the remaining file content block! ({})\n", std::strerror(errno));
return -1;
}
// Overwrite a buffer to write, to the intermediate remaining block (internal_buf_pos)
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(internal_buf_pos, reinterpret_cast<const uint8_t*>(buf) + buf_offset + block_write_size,
remaining);
}
else
{
CUDA_TRY(cudaMemcpy(internal_buf_pos,
reinterpret_cast<const uint8_t*>(buf) + buf_offset + block_write_size,
remaining, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
}
// Write the constructed block
write_cnt = ::pwrite(handle_->fd, internal_buf_pos, PAGE_SIZE, block_write_size);
if (write_cnt < 0)
{
fmt::print(stderr, "Cannot write the remaining file content! ({})\n", std::strerror(errno));
return -1;
}
total_write_cnt += remaining;
}
}
else
{
uint64_t cache_size = s_cufile_initializer.max_host_cache_size();
uint8_t* cache_buf = static_cast<uint8_t*>(s_cufile_cache.host_cache());
off_t file_start_offset = ALIGN_DOWN(file_offset, PAGE_SIZE);
off_t end_offset = count + file_offset;
off_t end_boundary_offset = ALIGN_UP(end_offset, PAGE_SIZE);
size_t large_block_size = end_boundary_offset - file_start_offset;
off_t page_offset = file_offset - file_start_offset;
const uint8_t* input_buf = static_cast<const uint8_t*>(buf) + buf_offset;
if (large_block_size <= cache_size) // Optimize if bytes to write is less than cache_size
{
memset(cache_buf, 0, PAGE_SIZE);
ssize_t read_cnt = ::pread(handle_->fd, cache_buf, PAGE_SIZE, file_start_offset);
if (read_cnt < 0)
{
fmt::print(
stderr, "Cannot read the head part of the file content block! ({})\n", std::strerror(errno));
return -1;
}
if (large_block_size > PAGE_SIZE)
{
read_cnt = ::pread(handle_->fd, cache_buf + large_block_size - PAGE_SIZE, PAGE_SIZE,
end_boundary_offset - PAGE_SIZE);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the tail part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
}
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(cache_buf + page_offset, input_buf, count);
}
else
{
CUDA_TRY(cudaMemcpy(cache_buf + page_offset, input_buf, count, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
}
// Write the constructed block
ssize_t write_cnt = ::pwrite(handle_->fd, cache_buf, large_block_size, file_start_offset);
if (write_cnt < 0)
{
fmt::print(stderr, "Cannot write the file content block! ({})\n", std::strerror(errno));
return -1;
}
total_write_cnt += std::min(static_cast<size_t>(write_cnt - page_offset), count);
}
else
{
off_t overflow_offset = page_offset + count;
size_t header_size = (overflow_offset > PAGE_SIZE) ? PAGE_SIZE - page_offset : count;
size_t tail_size = (overflow_offset > PAGE_SIZE) ? end_offset - ALIGN_DOWN(end_offset, PAGE_SIZE) : 0;
uint64_t body_remaining_size = count - header_size - tail_size;
off_t write_offset = file_start_offset;
size_t bytes_to_copy;
ssize_t read_cnt;
ssize_t write_cnt;
uint8_t internal_buf[PAGE_SIZE * 2]{};
uint8_t* internal_buf_pos =
reinterpret_cast<uint8_t*>(ALIGN_UP(static_cast<uint8_t*>(internal_buf), PAGE_SIZE));
// Handle the head part of the file content
if (header_size)
{
read_cnt = ::pread(handle_->fd, internal_buf_pos, PAGE_SIZE, write_offset);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the head part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
// Overwrite the region to write
bytes_to_copy = header_size;
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(internal_buf_pos + page_offset, input_buf, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(
internal_buf_pos + page_offset, input_buf, bytes_to_copy, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
}
// Write the constructed block
write_cnt = ::pwrite(handle_->fd, internal_buf_pos, PAGE_SIZE, write_offset);
if (write_cnt < 0)
{
fmt::print(stderr, "Cannot write the head part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
input_buf += bytes_to_copy;
write_offset += write_cnt;
total_write_cnt += bytes_to_copy;
}
// Copy n * PAGE_SIZE bytes
while (true)
{
size_t bytes_to_copy = std::min(cache_size, body_remaining_size);
if (bytes_to_copy == 0)
{
break;
}
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(cache_buf, input_buf, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(cache_buf, input_buf, bytes_to_copy, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
}
write_cnt = ::pwrite(handle_->fd, cache_buf, bytes_to_copy, write_offset);
write_offset += write_cnt;
input_buf += write_cnt;
body_remaining_size -= write_cnt;
total_write_cnt += bytes_to_copy;
}
// Handle the tail part of the file content
if (tail_size)
{
memset(internal_buf_pos, 0, PAGE_SIZE);
read_cnt = ::pread(handle_->fd, internal_buf_pos, PAGE_SIZE, write_offset);
if (read_cnt < 0)
{
fmt::print(stderr, "Cannot read the tail part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
// Overwrite the region to write
bytes_to_copy = tail_size;
if (memory_type == cudaMemoryTypeUnregistered)
{
memcpy(internal_buf_pos, input_buf, bytes_to_copy);
}
else
{
CUDA_TRY(cudaMemcpy(internal_buf_pos, input_buf, bytes_to_copy, cudaMemcpyDeviceToHost));
if (cuda_status)
{
return -1;
}
}
// Write the constructed block
write_cnt = ::pwrite(handle_->fd, internal_buf_pos, PAGE_SIZE, write_offset);
if (write_cnt < 0)
{
fmt::print(stderr, "Cannot write the tail part of the file content block! ({})\n",
std::strerror(errno));
return -1;
}
total_write_cnt += tail_size;
}
}
}
}
else if (file_type == FileHandleType::kGPUDirect)
{
(void*)s_cufile_cache.device_cache(); // Lazy initialization
ssize_t write_cnt =
cuFileWrite(handle_->cufile, reinterpret_cast<const char*>(buf) + buf_offset, count, file_offset, 0);
if (write_cnt < 0)
{
fmt::print(stderr, "[cuFile Error] {}\n", CUFILE_ERRSTR(write_cnt));
return -1;
}
total_write_cnt += write_cnt;
}
// Update file size
if (total_write_cnt > 0)
{
file_size_ = std::max(file_size_, file_offset + static_cast<size_t>(total_write_cnt));
}
return total_write_cnt;
}
bool CuFileDriver::close()
{
if (handle_ && handle_->cufile)
{
cuFileHandleDeregister(handle_->cufile);
}
if (mmap_ptr_)
{
int err = munmap(mmap_ptr_, file_size_);
if (err < 0)
{
fmt::print(stderr, "[Error] Cannot call munmap() ({})\n", std::strerror(errno));
}
mmap_ptr_ = nullptr;
}
if (handle_ && handle_->fd != -1)
{
// If block write was used
if ((file_flags_ & O_RDWR) &&
(handle_->type == FileHandleType::kGPUDirect || handle_->type == FileHandleType::kPosixODirect))
{
// Truncate file assuming that `file_size_` is up to date during pwrite() calls
int err = ::ftruncate(handle_->fd, file_size_);
if (err < 0)
{
fmt::print(stderr, "[Error] Cannot resize the file {} to {} ({})\n", handle_->path, file_size_,
std::strerror(errno));
}
}
handle_ = nullptr;
}
file_path_.clear();
file_size_ = 0;
file_flags_ = -1;
return true;
}
filesystem::Path CuFileDriver::path() const
{
return file_path_;
}
CuFileDriver::~CuFileDriver()
{
close();
}
} // namespace cucim::filesystem
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/concurrent/threadpool.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/concurrent/threadpool.h"
#include <fmt/format.h>
#include <taskflow/taskflow.hpp>
#include "cucim/profiler/nvtx3.h"
namespace cucim::concurrent
{
struct ThreadPool::Executor : public tf::Executor
{
// inherits Constructor
using tf::Executor::Executor;
};
ThreadPool::ThreadPool(int32_t num_workers)
{
num_workers_ = num_workers;
if (num_workers > 0)
{
executor_ = std::make_unique<Executor>(num_workers);
}
}
ThreadPool::~ThreadPool()
{
if (executor_)
{
executor_->wait_for_all();
}
}
ThreadPool::operator bool() const
{
return (num_workers_ > 0);
}
std::future<void> ThreadPool::enqueue(std::function<void()> task)
{
auto future = executor_->async([task]() { task(); });
return std::move(future);
}
void ThreadPool::wait()
{
if (executor_)
{
executor_->wait_for_all();
}
}
} // namespace cucim::concurrent
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/memory/memory_manager.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define CUCIM_EXPORTS // For exporting functions globally
#include "cucim/memory/memory_manager.h"
#include <memory_resource>
#include <cuda_runtime.h>
#include <fmt/format.h>
#include "cucim/io/device_type.h"
#include "cucim/profiler/nvtx3.h"
#include "cucim/util/cuda.h"
CUCIM_API void* cucim_malloc(size_t size)
{
PROF_SCOPED_RANGE(PROF_EVENT_P(cucim_malloc, size));
return malloc(size);
}
CUCIM_API void cucim_free(void* ptr)
{
PROF_SCOPED_RANGE(PROF_EVENT(cucim_free));
free(ptr);
}
namespace cucim::memory
{
void get_pointer_attributes(PointerAttributes& attr, const void* ptr)
{
cudaError_t cuda_status;
cudaPointerAttributes attributes;
CUDA_TRY(cudaPointerGetAttributes(&attributes, ptr));
if (cuda_status)
{
return;
}
cudaMemoryType& memory_type = attributes.type;
switch (memory_type)
{
case cudaMemoryTypeUnregistered:
attr.device = cucim::io::Device(cucim::io::DeviceType::kCPU, -1);
attr.ptr = const_cast<void*>(ptr);
break;
case cudaMemoryTypeHost:
attr.device = cucim::io::Device(cucim::io::DeviceType::kCUDAHost, attributes.device);
attr.ptr = attributes.hostPointer;
break;
case cudaMemoryTypeDevice:
attr.device = cucim::io::Device(cucim::io::DeviceType::kCUDA, attributes.device);
attr.ptr = attributes.devicePointer;
break;
case cudaMemoryTypeManaged:
attr.device = cucim::io::Device(cucim::io::DeviceType::kCUDAManaged, attributes.device);
attr.ptr = attributes.devicePointer;
break;
}
}
CUCIM_API bool move_raster_from_host(void** target, size_t size, const cucim::io::Device& dst_device)
{
switch (dst_device.type())
{
case cucim::io::DeviceType::kCPU:
break;
case cucim::io::DeviceType::kCUDA: {
cudaError_t cuda_status;
void* host_mem = *target;
void* cuda_mem;
CUDA_TRY(cudaMalloc(&cuda_mem, size));
if (cuda_status)
{
throw std::bad_alloc();
}
CUDA_TRY(cudaMemcpy(cuda_mem, host_mem, size, cudaMemcpyHostToDevice));
if (cuda_status)
{
throw std::bad_alloc();
}
cucim_free(host_mem);
*target = cuda_mem;
break;
}
default:
throw std::runtime_error("Unsupported device type");
}
return true;
}
CUCIM_API bool move_raster_from_device(void** target, size_t size, const cucim::io::Device& dst_device)
{
switch (dst_device.type())
{
case cucim::io::DeviceType::kCPU: {
cudaError_t cuda_status;
void* cuda_mem = *target;
void* host_mem = cucim_malloc(size);
CUDA_TRY(cudaMemcpy(host_mem, cuda_mem, size, cudaMemcpyDeviceToHost));
if (cuda_status)
{
throw std::bad_alloc();
}
cudaFree(cuda_mem);
*target = host_mem;
break;
}
case cucim::io::DeviceType::kCUDA:
break;
default:
throw std::runtime_error("Unsupported device type");
}
return true;
}
} // namespace cucim::memory
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/loader/thread_batch_data_loader.cpp | /*
* Apache License, Version 2.0
* Copyright 2021-2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/loader/thread_batch_data_loader.h"
#include <cassert>
#include <fmt/format.h>
#include "cucim/profiler/nvtx3.h"
#include "cucim/util/cuda.h"
namespace cucim::loader
{
ThreadBatchDataLoader::ThreadBatchDataLoader(LoadFunc load_func,
std::unique_ptr<BatchDataProcessor> batch_data_processor,
const cucim::io::Device out_device,
std::unique_ptr<std::vector<int64_t>> location,
std::unique_ptr<std::vector<int64_t>> image_size,
const uint64_t location_len,
const size_t one_raster_size,
const uint32_t batch_size,
const uint32_t prefetch_factor,
const uint32_t num_workers)
: load_func_(load_func),
out_device_(out_device),
location_(std::move(location)),
image_size_(std::move(image_size)),
location_len_(location_len),
one_rester_size_(one_raster_size),
batch_size_(batch_size),
prefetch_factor_(prefetch_factor),
num_workers_(num_workers),
batch_data_processor_(std::move(batch_data_processor)),
buffer_size_(one_raster_size * batch_size),
thread_pool_(num_workers),
queued_item_count_(0),
buffer_item_head_index_(0),
buffer_item_tail_index_(0),
processed_batch_count_(0),
current_data_(nullptr),
current_data_batch_size_(0)
{
buffer_item_len_ = std::min(static_cast<uint64_t>(location_len_), static_cast<uint64_t>(1 + prefetch_factor_)),
raster_data_.reserve(buffer_item_len_);
cucim::io::DeviceType device_type = out_device_.type();
for (size_t i = 0; i < buffer_item_len_; ++i)
{
switch (device_type)
{
case io::DeviceType::kCPU:
raster_data_.emplace_back(static_cast<uint8_t*>(cucim_malloc(buffer_size_)));
break;
case io::DeviceType::kCUDA: {
cudaError_t cuda_status;
void* image_data_ptr = nullptr;
CUDA_ERROR(cudaMalloc(&image_data_ptr, buffer_size_));
raster_data_.emplace_back(static_cast<uint8_t*>(image_data_ptr));
break;
}
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type);
break;
}
}
}
ThreadBatchDataLoader::~ThreadBatchDataLoader()
{
// Wait until all tasks are done.
while (wait_batch() > 0);
cucim::io::DeviceType device_type = out_device_.type();
for (auto& raster_ptr : raster_data_)
{
switch (device_type)
{
case io::DeviceType::kCPU:
if (raster_ptr)
{
cucim_free(raster_ptr);
}
break;
case io::DeviceType::kCUDA:
cudaError_t cuda_status;
if (raster_ptr)
{
cuda_status = cudaSuccess;
CUDA_TRY(cudaFree(raster_ptr));
}
break;
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!", device_type);
break;
}
raster_ptr = nullptr;
}
if (batch_data_processor_)
{
stopped_ = true;
batch_data_processor_->shutdown();
}
}
ThreadBatchDataLoader::operator bool() const
{
return (num_workers_ > 0);
}
uint8_t* ThreadBatchDataLoader::raster_pointer(const uint64_t location_index) const
{
uint64_t buffer_item_index = (location_index / batch_size_) % buffer_item_len_;
uint32_t raster_data_index = location_index % batch_size_;
assert(buffer_item_index < buffer_item_len_);
uint8_t* batch_raster_ptr = raster_data_[buffer_item_index];
return &batch_raster_ptr[raster_data_index * one_rester_size_];
}
uint32_t ThreadBatchDataLoader::request(uint32_t load_size)
{
if (num_workers_ == 0)
{
return 0;
}
if (load_size == 0)
{
load_size = batch_size_;
}
uint32_t num_items_to_request = std::min(load_size, static_cast<uint32_t>(location_len_ - queued_item_count_));
for (uint32_t i = 0; i < num_items_to_request; ++i)
{
uint32_t last_item_count = 0;
if (!tasks_.empty())
{
last_item_count = tasks_.size();
}
load_func_(this, queued_item_count_);
++queued_item_count_;
buffer_item_tail_index_ = queued_item_count_ % buffer_item_len_;
// Append the number of added tasks to the batch count list.
batch_item_counts_.emplace_back(tasks_.size() - last_item_count);
}
if (batch_data_processor_)
{
uint32_t num_remaining_patches = static_cast<uint32_t>(location_len_ - queued_item_count_);
batch_data_processor_->request(batch_item_counts_, num_remaining_patches);
}
return num_items_to_request;
}
uint32_t ThreadBatchDataLoader::wait_batch()
{
if (num_workers_ == 0)
{
return 0;
}
uint32_t num_items_waited = 0;
for (uint32_t batch_item_index = 0; batch_item_index < batch_size_ && !batch_item_counts_.empty(); ++batch_item_index)
{
uint32_t batch_item_count = batch_item_counts_.front();
for (uint32_t i = 0; i < batch_item_count; ++i)
{
auto& future = tasks_.front();
future.wait();
tasks_.pop_front();
if (batch_data_processor_)
{
batch_data_processor_->remove_front_tile();
uint32_t num_remaining_patches = static_cast<uint32_t>(location_len_ - queued_item_count_);
batch_data_processor_->wait_batch(i, batch_item_counts_, num_remaining_patches);
}
}
batch_item_counts_.pop_front();
num_items_waited += batch_item_count;
}
return num_items_waited;
}
uint8_t* ThreadBatchDataLoader::next_data()
{
if (num_workers_ == 0) // (location_len == 1 && batch_size == 1)
{
// If it reads entire image with multi threads (using loader), release raster memory from batch data loader
// by setting it to nullptr so that it will not be freed by ~ThreadBatchDataLoader (destructor).
uint8_t* batch_raster_ptr = raster_data_[0];
raster_data_[0] = nullptr;
return batch_raster_ptr;
}
if (processed_batch_count_ * batch_size_ >= location_len_)
{
// If all batches are processed, return nullptr.
return nullptr;
}
// Wait until the batch is ready.
wait_batch();
uint8_t* batch_raster_ptr = raster_data_[buffer_item_head_index_];
cucim::io::DeviceType device_type = out_device_.type();
switch (device_type)
{
case io::DeviceType::kCPU:
raster_data_[buffer_item_head_index_] = static_cast<uint8_t*>(cucim_malloc(buffer_size_));
break;
case io::DeviceType::kCUDA: {
cudaError_t cuda_status;
CUDA_ERROR(cudaMalloc(&raster_data_[buffer_item_head_index_], buffer_size_));
break;
}
case io::DeviceType::kCUDAHost:
case io::DeviceType::kCUDAManaged:
case io::DeviceType::kCPUShared:
case io::DeviceType::kCUDAShared:
fmt::print(stderr, "Device type {} is not supported!\n", device_type);
break;
}
buffer_item_head_index_ = (buffer_item_head_index_ + 1) % buffer_item_len_;
current_data_ = batch_raster_ptr;
current_data_batch_size_ =
std::min(location_len_ - (processed_batch_count_ * batch_size_), static_cast<uint64_t>(batch_size_));
++processed_batch_count_;
// Prepare the next batch
request(batch_size_);
return batch_raster_ptr;
}
BatchDataProcessor* ThreadBatchDataLoader::batch_data_processor()
{
return batch_data_processor_.get();
}
std::shared_ptr<cucim::cache::ImageCacheValue> ThreadBatchDataLoader::wait_for_processing(uint32_t index)
{
if (batch_data_processor_ == nullptr || stopped_)
{
return std::shared_ptr<cucim::cache::ImageCacheValue>();
}
return batch_data_processor_->wait_for_processing(index);
}
uint64_t ThreadBatchDataLoader::size() const
{
return location_len_;
}
uint32_t ThreadBatchDataLoader::batch_size() const
{
return batch_size_;
}
uint64_t ThreadBatchDataLoader::total_batch_count() const
{
return (location_len_ + batch_size_ - 1) / batch_size_;
}
uint64_t ThreadBatchDataLoader::processed_batch_count() const
{
return processed_batch_count_;
}
uint8_t* ThreadBatchDataLoader::data() const
{
return current_data_;
}
uint32_t ThreadBatchDataLoader::data_batch_size() const
{
return current_data_batch_size_;
}
bool ThreadBatchDataLoader::enqueue(std::function<void()> task, const TileInfo& tile)
{
if (num_workers_ > 0)
{
auto future = thread_pool_.enqueue(task);
tasks_.emplace_back(std::move(future));
if (batch_data_processor_)
{
batch_data_processor_->add_tile(tile);
}
return true;
}
return false;
}
} // namespace cucim::loader
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/loader/batch_data_processor.cpp | /*
* Apache License, Version 2.0
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/loader/batch_data_processor.h"
#include <cuda_runtime.h>
#include <fmt/format.h>
#include "cucim/cache/image_cache_manager.h"
namespace cucim::loader
{
BatchDataProcessor::BatchDataProcessor(const uint32_t batch_size) : batch_size_(batch_size), processed_index_count_(0)
{
}
BatchDataProcessor::~BatchDataProcessor()
{
}
void BatchDataProcessor::add_tile(const TileInfo& tile)
{
tiles_.emplace_back(tile);
++total_index_count_;
}
TileInfo BatchDataProcessor::remove_front_tile()
{
const TileInfo tile = tiles_.front();
tiles_.pop_front();
++processed_index_count_;
return tile;
}
uint32_t BatchDataProcessor::request(std::deque<uint32_t>& batch_item_counts, const uint32_t num_remaining_patches)
{
(void)batch_item_counts;
(void)num_remaining_patches;
return 0;
}
uint32_t BatchDataProcessor::wait_batch(const uint32_t index_in_task,
std::deque<uint32_t>& batch_item_counts,
const uint32_t num_remaining_patches)
{
(void)index_in_task;
(void)batch_item_counts;
(void)num_remaining_patches;
return 0;
}
std::shared_ptr<cucim::cache::ImageCacheValue> BatchDataProcessor::wait_for_processing(const uint32_t)
{
return std::shared_ptr<cucim::cache::ImageCacheValue>();
}
void BatchDataProcessor::shutdown()
{
}
} // namespace cucim::loader
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/plugin/plugin_config.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/plugin/plugin_config.h"
#include <fmt/format.h>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
namespace cucim::plugin
{
void PluginConfig::load_config(const void* json_obj)
{
const json& plugin_config = *(static_cast<const json*>(json_obj));
if (plugin_config.contains("names") && plugin_config["names"].is_array())
{
std::vector<std::string> names;
names.reserve(16);
for (const auto& name : plugin_config["names"])
{
names.push_back(name);
}
plugin_names = std::move(names);
}
}
} // namespace cucim::plugin
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/plugin/image_format.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/io/format/image_format.h"
#include "cucim/plugin/image_format.h"
#include "cucim/profiler/nvtx3.h"
#include <fmt/format.h>
namespace cucim::plugin
{
bool ImageFormat::add_interfaces(const cucim::io::format::IImageFormat* image_formats)
{
if (image_formats && image_formats->format_count > 0)
{
for (size_t i = 0; i < image_formats->format_count; ++i)
{
cucim::io::format::ImageFormatDesc* format = &(image_formats->formats[i]);
image_formats_.push_back(format);
}
}
else
{
return false;
}
return true;
}
cucim::io::format::ImageFormatDesc* ImageFormat::detect_image_format(const cucim::filesystem::Path& path)
{
PROF_SCOPED_RANGE(PROF_EVENT(cucim_plugin_detect_image_format));
for (auto& format : image_formats_)
{
if (format->image_checker.is_valid(path.c_str(), nullptr, 0))
{
return format;
}
}
throw std::invalid_argument(fmt::format("Cannot find a plugin to handle '{}'!", path));
}
} // namespace cucim::plugin
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/codec/base64.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/codec/base64.h"
#include "cucim/memory/memory_manager.h"
#include <absl/strings/escaping.h>
namespace cucim::codec::base64
{
bool encode(const char* src, int src_count, char** out_dst, int* out_count)
{
if (src == nullptr)
{
return 1;
}
absl::string_view sv(src, src_count);
std::string output;
absl::Base64Escape(sv, &output);
int count = output.size();
if (out_dst == nullptr)
{
*out_dst = static_cast<char*>(cucim_malloc(count + 1));
}
memcpy(*out_dst, output.c_str(), count);
*out_dst[count] = '\0';
if (out_count != nullptr)
{
*out_count = count;
}
return 0;
}
bool decode(const char* src, int src_count, char** out_dst, int* out_count)
{
if (src == nullptr)
{
return 1;
}
absl::string_view sv(src, src_count);
std::string output;
if (absl::Base64Unescape(sv, &output))
{
int count = output.size();
if (out_dst == nullptr)
{
*out_dst = static_cast<char*>(cucim_malloc(count + 1));
}
memcpy(*out_dst, output.c_str(), count);
*out_dst[count] = '\0';
if (out_count != nullptr)
{
*out_count = count;
}
}
return 0;
}
} // namespace cucim::codec::base64
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/io/device_type.cpp | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/io/device_type.h"
#include "cucim/cpp20/find_if.h"
namespace cucim::io
{
using namespace std::literals::string_view_literals;
constexpr DeviceType DeviceTypeMap::at(const std::string_view& key) const
{
const auto itr = cucim::cpp20::find_if(begin(data), end(data), [&key](const auto& v) { return v.first == key; });
if (itr != end(data))
{
return itr->second;
}
else
{
return DeviceType::kCPU;
}
}
constexpr std::string_view DeviceTypeStrMap::at(const DeviceType& key) const
{
const auto itr = cucim::cpp20::find_if(begin(data), end(data), [&key](const auto& v) { return v.first == key; });
if (itr != end(data))
{
return itr->second;
}
else
{
return "cpu"sv;
}
}
static constexpr std::array<std::pair<std::string_view, DeviceType>, kDeviceTypeCount> device_type_values{
{ { "cpu"sv, DeviceType::kCPU },
{ "cuda"sv, DeviceType::kCUDA },
{ "cuda_host"sv, DeviceType::kCUDAHost },
{ "cuda_managed"sv, DeviceType::kCUDAManaged },
{ "cpu_shared"sv, DeviceType::kCPUShared },
{ "cuda_shared"sv, DeviceType::kCUDAShared } }
};
DeviceType lookup_device_type(const std::string_view sv)
{
static constexpr auto map = DeviceTypeMap{ { device_type_values } };
return map.at(sv);
}
static constexpr std::array<std::pair<DeviceType, std::string_view>, kDeviceTypeCount> device_type_str_values{
{ { DeviceType::kCPU, "cpu"sv },
{ DeviceType::kCUDA, "cuda"sv },
{ DeviceType::kCUDAHost, "cuda_host"sv },
{ DeviceType::kCUDAManaged, "cuda_managed"sv },
{ DeviceType::kCPUShared, "cpu_shared"sv },
{ DeviceType::kCUDAShared, "cuda_shared"sv } }
};
std::string_view lookup_device_type_str(const DeviceType key)
{
static constexpr auto map = DeviceTypeStrMap{ { device_type_str_values } };
return map.at(key);
}
} // namespace cucim::io
| 0 |
rapidsai_public_repos/cucim/cpp/src | rapidsai_public_repos/cucim/cpp/src/io/device.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/io/device.h"
#include <regex>
#include <string>
#include <string_view>
#include <fmt/format.h>
#include "cucim/macros/defines.h"
namespace cucim::io
{
Device::Device()
{
// TODO: consider default case (how to handle -1 index?)
}
Device::Device(const Device& device) : type_(device.type_), index_(device.index_), shm_name_(device.shm_name_)
{
}
Device::Device(const std::string& device_name)
{
// 'cuda', 'cuda:0', 'cpu[shm0]', 'cuda:0[cuda_shm0]'
static const std::regex name_regex("([a-z]+)(?::(0|[1-9]\\d*))?(?:\\[([a-zA-Z0-9_\\-][a-zA-Z0-9_\\-\\.]*)\\])?");
std::smatch match;
if (std::regex_match(device_name, match, name_regex))
{
type_ = parse_type(match[1].str());
if (match[2].matched)
{
index_ = std::stoi(match[2].str());
}
if (match[3].matched)
{
shm_name_ = match[3].str();
}
}
else
{
CUCIM_ERROR("Device name doesn't match!");
}
validate_device();
}
Device::Device(const char* device_name) : Device::Device(std::string(device_name))
{
}
Device::Device(DeviceType type, DeviceIndex index)
{
type_ = type;
index_ = index;
validate_device();
}
Device::Device(DeviceType type, DeviceIndex index, const std::string& param)
{
type_ = type;
index_ = index;
shm_name_ = param;
validate_device();
}
DeviceType Device::parse_type(const std::string& device_name)
{
return lookup_device_type(device_name);
}
Device::operator std::string() const
{
std::string_view device_type_str = lookup_device_type_str(type_);
if (index_ == -1 && shm_name_.empty())
{
return fmt::format("{}", device_type_str);
}
else if (index_ != -1 && shm_name_.empty())
{
return fmt::format("{}:{}", device_type_str, index_);
}
else
{
return fmt::format("{}:{}[{}]", device_type_str, index_, shm_name_);
}
}
DeviceType Device::type() const
{
return type_;
};
DeviceIndex Device::index() const
{
return index_;
}
const std::string& Device::shm_name() const
{
return shm_name_;
}
void Device::set_values(DeviceType type, DeviceIndex index, const std::string& param)
{
type_ = type;
index_ = index;
shm_name_ = param;
}
bool Device::validate_device()
{
// TODO: implement this
return true;
}
} // namespace cucim::io
| 0 |
rapidsai_public_repos/cucim/cpp/src/io | rapidsai_public_repos/cucim/cpp/src/io/format/image_format.cpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cucim/macros/defines.h"
#include "cucim/io/format/image_format.h"
#include "cucim/memory/memory_manager.h"
#include <fmt/format.h>
namespace cucim::io::format
{
ImageMetadata::ImageMetadata()
{
desc_.handle = this;
}
void* ImageMetadata::allocate(size_t size)
{
return res_.allocate(size);
}
std::pmr::monotonic_buffer_resource& ImageMetadata::get_resource()
{
return res_;
}
ImageMetadataDesc& ImageMetadata::desc()
{
return desc_;
}
ImageMetadata& ImageMetadata::ndim(uint16_t ndim)
{
desc_.ndim = ndim;
return *this;
}
ImageMetadata& ImageMetadata::dims(std::string_view&& dims)
{
dims_ = dims;
desc_.dims = dims_.data();
return *this;
}
ImageMetadata& ImageMetadata::shape(std::pmr::vector<int64_t>&& shape)
{
shape_ = shape;
desc_.shape = const_cast<int64_t*>(shape_.data());
return *this;
}
ImageMetadata& ImageMetadata::dtype(const DLDataType& dtype)
{
desc_.dtype = dtype;
return *this;
}
ImageMetadata& ImageMetadata::channel_names(std::pmr::vector<std::string_view>&& channel_names)
{
const int channel_len = channel_names.size();
channel_names_ = channel_names;
desc_.channel_names = static_cast<char**>(allocate(channel_len * sizeof(char*)));
for (int i = 0; i < channel_len; ++i)
{
desc_.channel_names[i] = const_cast<char*>(channel_names_[i].data());
}
return *this;
}
ImageMetadata& ImageMetadata::spacing(std::pmr::vector<float>&& spacing)
{
spacing_ = spacing;
desc_.spacing = const_cast<float*>(spacing_.data());
return *this;
}
ImageMetadata& ImageMetadata::spacing_units(std::pmr::vector<std::string_view>&& spacing_units)
{
const int ndim = spacing_units.size();
spacing_units_ = spacing_units;
desc_.spacing_units = static_cast<char**>(allocate(ndim * sizeof(char*)));
for (int i = 0; i < ndim; ++i)
{
desc_.spacing_units[i] = const_cast<char*>(spacing_units_[i].data());
}
return *this;
}
ImageMetadata& ImageMetadata::origin(std::pmr::vector<float>&& origin)
{
origin_ = origin;
desc_.origin = const_cast<float*>(origin_.data());
return *this;
}
ImageMetadata& ImageMetadata::direction(std::pmr::vector<float>&& direction)
{
direction_ = direction;
desc_.direction = const_cast<float*>(direction_.data());
return *this;
}
ImageMetadata& ImageMetadata::coord_sys(std::string_view&& coord_sys)
{
coord_sys_ = coord_sys;
desc_.coord_sys = coord_sys_.data();
return *this;
}
ImageMetadata& ImageMetadata::level_count(uint16_t level_count)
{
desc_.resolution_info.level_count = level_count;
return *this;
}
ImageMetadata& ImageMetadata::level_ndim(uint16_t level_ndim)
{
desc_.resolution_info.level_ndim = level_ndim;
return *this;
}
ImageMetadata& ImageMetadata::level_dimensions(std::pmr::vector<int64_t>&& level_dimensions)
{
level_dimensions_ = level_dimensions;
desc_.resolution_info.level_dimensions = const_cast<int64_t*>(level_dimensions_.data());
return *this;
}
ImageMetadata& ImageMetadata::level_downsamples(std::pmr::vector<float>&& level_downsamples)
{
level_downsamples_ = level_downsamples;
desc_.resolution_info.level_downsamples = const_cast<float*>(level_downsamples_.data());
return *this;
}
ImageMetadata& ImageMetadata::level_tile_sizes(std::pmr::vector<uint32_t>&& level_tile_sizes)
{
level_tile_sizes_ = level_tile_sizes;
desc_.resolution_info.level_tile_sizes = const_cast<uint32_t*>(level_tile_sizes_.data());
return *this;
}
ImageMetadata& ImageMetadata::image_count(uint16_t image_count)
{
desc_.associated_image_info.image_count = image_count;
return *this;
}
ImageMetadata& ImageMetadata::image_names(std::pmr::vector<std::string_view>&& image_names)
{
const int image_size = image_names.size();
image_names_ = image_names;
desc_.associated_image_info.image_names = static_cast<char**>(allocate(image_size * sizeof(char*)));
for (int i = 0; i < image_size; ++i)
{
desc_.associated_image_info.image_names[i] = const_cast<char*>(image_names_[i].data());
}
return *this;
}
ImageMetadata& ImageMetadata::raw_data(const std::string_view& raw_data)
{
desc_.raw_data = raw_data.data();
return *this;
}
ImageMetadata& ImageMetadata::json_data(const std::string_view& json_data)
{
desc_.json_data = const_cast<char*>(json_data.data());
return *this;
}
ImageMetadata::~ImageMetadata()
{
// Memory for json_data needs to be manually released if image_metadata_->json_data is not ""
// This logic may be already executed(@CuImage::~CuImage()) if this object is part of CuImage object.
if (desc_.json_data && *desc_.json_data != '\0')
{
cucim_free(desc_.json_data);
desc_.json_data = nullptr;
}
desc_.handle = nullptr;
}
} // namespace cucim::io::format
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/vesselness_example.ipynb | from time import time
import numpy as np
from skimage import data
from skimage import color
from skimage.filters import meijering, sato, frangi, hessian
import matplotlib.pyplot as plt
def identity(image, **kwargs):
"""Return the original image, ignoring any kwargs."""
return image
retina = data.retina()[200:-200, 200:-200]
image = color.rgb2gray(retina)
image = image.astype(np.float32)
# image = np.tile(image, (4, 4)) # tile to increase size to roughly (4000, 4000)
print(f"image.shape = {image.shape}")
kwargs = {'sigmas': [2], 'mode': 'reflect'}
fig, axes = plt.subplots(2, 5, figsize=[16, 8])
cmap = plt.cm.gray
tstart = time()
for i, black_ridges in enumerate([1, 0]):
for j, func in enumerate([identity, meijering, sato, frangi, hessian]):
kwargs['black_ridges'] = black_ridges
result = func(image, **kwargs)
vmin, vmax = np.percentile(result, q=[1, 99.5])
axes[i, j].imshow(result, cmap=cmap, vmin=vmin, vmax=vmax, aspect='auto')
if i == 0:
axes[i, j].set_title(['Original\nimage', 'Meijering\nneuriteness',
'Sato\ntubeness', 'Frangi\nvesselness',
'Hessian\nvesselness'][j])
if j == 0:
axes[i, j].set_ylabel('black_ridges = ' + str(bool(black_ridges)))
axes[i, j].set_xticks([])
axes[i, j].set_yticks([])
print(f"duration = {time() - tstart} s")
plt.tight_layout()
plt.show()from time import time
import cupy as cp
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
durations = {}
for use_gpu in (False, True):
if use_gpu:
from cucim.skimage import color
from cucim.skimage.filters import meijering, sato, frangi, hessian
xp = cp
asnumpy = cp.asnumpy
device_name = "gpu"
else:
from skimage import color
from skimage.filters import meijering, sato, frangi, hessian
xp = np
asnumpy = np.asarray
device_name = "cpu"
def identity(image, **kwargs):
"""Return the original image, ignoring any kwargs."""
return image
retina = data.retina()[200:-200, 200:-200]
# transfer image to the GPU
retina = xp.asarray(retina)
image = color.rgb2gray(retina)
image = image.astype(np.float32)
# image = cp.tile(image, (4, 4)) # tile to increase size to roughly (4000, 4000)
print(f"image.shape = {image.shape}")
cmap = plt.cm.gray
kwargs = {'sigmas': [2], 'mode': 'reflect'}
fig, axes = plt.subplots(2, 5, figsize=[16, 8])
tstart = time()
for i, black_ridges in enumerate([1, 0]):
for j, func in enumerate([identity, meijering, sato, frangi, hessian]):
kwargs['black_ridges'] = black_ridges
result = func(image, **kwargs)
# transfer back to host for visualization with Matplotlib
result_cpu = asnumpy(result)
vmin, vmax = map(float, xp.percentile(result, q=[1, 99.5]))
axes[i, j].imshow(result_cpu, cmap=cmap, vmin=vmin, vmax=vmax, aspect='auto')
if i == 0:
axes[i, j].set_title(['Original\nimage', 'Meijering\nneuriteness',
'Sato\ntubeness', 'Frangi\nvesselness',
'Hessian\nvesselness'][j])
if j == 0:
axes[i, j].set_ylabel('black_ridges = ' + str(bool(black_ridges)))
axes[i, j].set_xticks([])
axes[i, j].set_yticks([])
dur = time() - tstart
print(f"duration = {dur} s")
durations[device_name] = dur
plt.tight_layout()
plt.show()
print(f"GPU Acceleration = {durations['cpu']/durations['gpu']:0.4f}") | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Working_with_Albumentation.ipynb | #!conda install -c conda-forge matplotlib albumentations
# or
#!pip install matplotlib albumentationsINPUT_PATH = 'input/image2.tif'from cucim import CuImage
img = CuImage(INPUT_PATH)try:
import matplotlib
except ImportError:
raise ImportError("This example requires 'matplotlib' library")from matplotlib import pyplot as plt
def visualize(image):
dpi = 80.0
height, width, _ = image.shape
plt.figure(figsize=(width / dpi, height / dpi))
plt.axis('off')
plt.imshow(image)from PIL import Image
import numpy as np
# Read whole slide at the lowest redsolution
resolutions = img.resolutions
level_count = resolutions["level_count"]
print(img.size('XY'))
region = img.read_region(location=[50000, 40000], size=(512, 512), level=0)
#Image.fromarray(np.asarray(region))
visualize(region)import cupy as cp
img = CuImage(INPUT_PATH)
region = np.asarray(img.read_region((50000, 40000), (512, 512)))
visualize(cp.asarray(region).get())# The following code is modified from https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_ihc_color_separation.html#sphx-glr-auto-examples-color-exposure-plot-ihc-color-separation-py
#
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from cucim.skimage import color
#transfer our array to the device
ihc_rgb = cp.asarray(region)
# transform to colorspace where the stains are separated
ihc_hed = color.rgb2hed(ihc_rgb)
# Create an RGB image for visualizing each of the stains
null = cp.zeros_like(ihc_hed[:, :, 0])
ihc_h = color.hed2rgb(cp.stack((ihc_hed[:, :, 0], null, null), axis=-1))
ihc_e = color.hed2rgb(cp.stack((null, ihc_hed[:, :, 1], null), axis=-1))
ihc_d = color.hed2rgb(cp.stack((null, null, ihc_hed[:, :, 2]), axis=-1))
# Transfer each color image back to the CPU prior to visualization
ihc_h, ihc_e, ihc_d = map(cp.asnumpy, [ihc_h, ihc_e, ihc_d])
fig, axes = plt.subplots(2, 2, figsize=(14, 14), sharex=True, sharey=True)
fontdict = dict(fontsize=18, fontweight='bold')
ax = axes.ravel()
ax[0].imshow(cp.asnumpy(ihc_rgb))
ax[0].set_title("Original image", fontdict=fontdict)
ax[1].imshow(ihc_h)
ax[1].set_title("Hematoxylin", fontdict=fontdict)
ax[2].imshow(ihc_e)
ax[2].set_title("Eosin", fontdict=fontdict)
ax[3].imshow(ihc_d)
ax[3].set_title("DAB", fontdict=fontdict)
for a in ax.ravel():
a.axis('off')
fig.tight_layout()
plt.show()try:
import albumentations
except ImportError:
raise ImportError("This example requires albumentations.")from cucim.skimage.transform import resize
from cucim.skimage.util import img_as_ubyte
import cupy as cp
resized_image = resize(cp.asarray(region),(128, 128))
# PIL.Image accepts only 8-bit image and resized_image has float64 data
# See https://scikit-image.org/docs/dev/user_guide/data_types.html#input-types
print(resized_image.dtype)
# Convert to 8-bit image
resized_image = img_as_ubyte(resized_image)
print(resized_image.dtype)
#Image.fromarray(resized_image.get())
visualize(resized_image.get())import random
import cv2
from matplotlib import pyplot as plt
import albumentations as A
from albumentations import Compose
from albumentations.core.transforms_interface import ImageOnlyTransform
from cucim import CuImage
from cucim.skimage.transform import resize
from cucim.skimage.util import img_as_ubyte
import numpy as np
import cupy as cp
def visualize(image):
dpi = 80.0
height, width, _ = image.shape
plt.figure(figsize=(width / dpi, height / dpi))
plt.axis('off')
plt.imshow(image)
class Resize(ImageOnlyTransform):
def __init__(self, height, width, order=None, mode='reflect', cval=0, clip=True, preserve_range=False, anti_aliasing=None,
anti_aliasing_sigma=None, **params):
super().__init__(self)
self.height = height
self.width = width
# self.order = order
# self.mode = mode
# self.cval = cval
# self.clip = clip
# self.preserve_range = preserve_range
# self.anti_aliasing = anti_aliasing
def apply(self, img, **params):
# Note: output_shape argument is (height, width)
resized_image = img_as_ubyte(resize(cp.asarray(region), (256, 256))).get()
return resized_image
img = CuImage(INPUT_PATH)
image = np.asarray(img.read_region((50000, 40000), (512, 512)))
visualize(image)
transform = Compose([
# Note: input argument is (height, width), not (width, height)
Resize(256, 256),
])
augmented_image = transform(image=image)['image']
visualize(augmented_image)
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Working_with_DALI.ipynb | #!conda install -c rapidsai -c conda-forge cucim cudatoolkit=11.0 matplotlib
#!pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110
# or
#!pip install cucim scipy scikit-image cupy-cuda110 matplotlib
# Assume that CUDA Toolkit 11.0 is available on the systsem.
#!pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110from cucim import CuImage
import nvidia.dali as dali
import nvidia.dali.fn as fn
import numpy as np
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
%matplotlib inline
def show_images(image_batch):
columns = 4
rows = (batch_size + 1) // (columns)
fig = plt.figure(figsize = (32,(32 // columns) * rows))
gs = gridspec.GridSpec(rows, columns)
for j in range(rows*columns):
plt.subplot(gs[j])
plt.axis("off")
plt.imshow(image_batch.at(j))
def filter_images(image):
# Filter with cuCIM's operation methods (future work)
# example)
# from cucim.core import filters
# filtered_image = filters.sobel(image)
filtered_image = image
return filtered_image
def gen_image(file_name, location_list, size_list, level_list, batch_size):
image = CuImage(file_name)
batch = []
for location, size, level in zip(location_list, size_list, level_list):
region = image.read_region(location=location, size=size, level=level)
batch.append(np.asarray(region))
if (len(batch) == batch_size):
yield (batch,)
batch = []
file_name = "input/image.tif"
location_list = [ (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000)]
size_list = [ (64, 64), (128, 128), (512, 512), (800, 800), (800, 800), (800, 800), (800, 800), (800, 800)]
level_list = [ 0, 0, 0, 0, 1, 2, 3, 4]
batch_size = 4
num_threads = 1
pipe = dali.pipeline.Pipeline(batch_size = batch_size, num_threads = num_threads, device_id = 0,
exec_async=False, exec_pipelined=False, seed=99)
with pipe:
input_image = fn.external_source(source=gen_image(file_name, location_list, size_list, level_list, batch_size),
num_outputs=1)
filtered_image = fn.python_function(input_image[0], function=filter_images, num_outputs=1)
resize = fn.resize(filtered_image, resize_x=128, resize_y=128)
pipe.set_outputs(resize)
pipe.build()
while True:
try:
output = pipe.run()
show_images(output[0])
except StopIteration:
break from cucim import CuImage
from cucim.skimage import color
from cucim.skimage.util import img_as_ubyte
import nvidia.dali as dali
import nvidia.dali.fn as fn
import cupy as cp
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
%matplotlib inline
def show_images(image_batch):
columns = 4
rows = (batch_size + 1) // (columns)
fig = plt.figure(figsize = (32,(32 // columns) * rows))
gs = gridspec.GridSpec(rows, columns)
for j in range(rows*columns):
plt.subplot(gs[j])
plt.axis("off")
plt.imshow(image_batch.at(j))
def filter_images(image):
ihc_hed = color.rgb2hed(image)
# Create an RGB image for visualizing hematoxylin stain.
# (https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_ihc_color_separation.html#sphx-glr-auto-examples-color-exposure-plot-ihc-color-separation-py)
null = cp.zeros_like(ihc_hed[:, :, 0])
ihc_h = color.hed2rgb(cp.stack((ihc_hed[:, :, 0], null, null), axis=-1))
return img_as_ubyte(ihc_h)
def gen_image(file_name, location_list, size_list, level_list, batch_size):
image = CuImage(file_name)
batch = []
for location, size, level in zip(location_list, size_list, level_list):
region = image.read_region(location=location, size=size, level=level)
batch.append(cp.asarray(region))
if (len(batch) == batch_size):
yield (batch,)
batch = []
file_name = "input/image.tif"
location_list = [ (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000), (5000, 5000)]
size_list = [ (64, 64), (128, 128), (512, 512), (800, 800), (800, 800), (800, 800), (800, 800), (800, 800)]
level_list = [ 0, 0, 0, 0, 1, 2, 3, 4]
batch_size = 4
num_threads = 1
pipe = dali.pipeline.Pipeline(batch_size = batch_size, num_threads = num_threads, device_id = 0,
exec_async=False, exec_pipelined=False, seed=99)
with pipe:
input_image = fn.external_source(source=gen_image(file_name, location_list, size_list, level_list, batch_size),
device='gpu', num_outputs=1)
filtered_image = fn.python_function(input_image[0], function=filter_images, device='gpu', num_outputs=1)
resize = fn.resize(filtered_image, device='gpu', resize_x=128, resize_y=128)
pipe.set_outputs(resize)
pipe.build()
while True:
try:
output = pipe.run()
show_images(output[0].as_cpu())
except StopIteration:
break | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/gabor_example.ipynb | import time
import cupy as cp
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
durations = {}
for use_gpu in (False, True):
if use_gpu:
from cupyx.scipy import ndimage as ndi
from cucim.skimage.util import img_as_float32
from cucim.skimage.filters import gabor_kernel
xp = cp
asnumpy = cp.asnumpy
device_name = "gpu"
else:
from scipy import ndimage as ndi
from skimage.util import img_as_float32
from skimage.filters import gabor_kernel
xp = np
asnumpy = np.asarray
device_name = "cpu"
def compute_feats(image, kernels):
feats = xp.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = xp.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
tstart = time.time()
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma)
kernels.append(kernel.real)
#shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float32(xp.asarray(data.brick())) # [shrink]
grass = img_as_float32(xp.asarray(data.grass())) # [shrink]
gravel = img_as_float32(xp.asarray(data.gravel())) # [shrink]
image_names = ('brick', 'grass', 'gravel')
images = (brick, grass, gravel)
# prepare reference features
ref_feats = xp.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(gravel, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return xp.sqrt(ndi.convolve(image, kernel.real, mode='wrap')**2 +
ndi.convolve(image, kernel.imag, mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, xp.stack([power(img, kernel) for img in images])))
dur = time.time() - tstart
print(f"Duration {device_name} = {dur} s")
durations[device_name] = dur
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(12, 14))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(asnumpy(img))
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(asnumpy(kernel.real))
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = float(powers.min())
vmax = float(powers.max())
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(asnumpy(patch), vmin=vmin, vmax=vmax)
ax.axis('off')
plt.tight_layout()
plt.show()
print(f"GPU Acceleration = {durations['cpu']/durations['gpu']:0.4f}") | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Supporting_Aperio_SVS_Format.ipynb | DATA_PATH_LIST = []
import json
import wget
from pathlib import Path
data_url = "https://openslide.cs.cmu.edu/download/openslide-testdata"
if not Path("index.json").exists():
wget.download(f"{data_url}/index.json")
with open("index.json") as fp:
data_index = json.load(fp)
Path("Aperio").mkdir(parents=True, exist_ok=True)
for file_path in data_index:
if file_path.startswith("Aperio/"):
DATA_PATH_LIST.append(file_path)
if not Path(file_path).exists():
file_info = data_index[file_path]
print(f"# Downloading {file_path} ({file_info['description']}) ...")
wget.download(f"{data_url}/{file_path}", out=file_path)
print()from cucim import CuImage
import pprint
pp = pprint.PrettyPrinter(indent=2, compact=True)
for file_name in DATA_PATH_LIST:
img = CuImage(file_name)
print("#", file_name)
metadata = img.metadata
pp.pprint(img.metadata)
img.associated_imagesfrom matplotlib import pyplot as plt
def visualize(image, downsample=1):
dpi = 80.0 * downsample
height, width, _ = image.shape
plt.figure(figsize=(width / dpi, height / dpi))
plt.axis('off')
plt.imshow(image)
plt.close('all')
import numpy as np
from cucim import CuImage
for file_name in DATA_PATH_LIST:
img = CuImage(file_name)
metadata = img.metadata
level_count = metadata["cucim"]["resolutions"]["level_count"]
small_img = img.read_region(level=level_count - 1) # read whole image at the lowest resolution level
label_img = img.associated_image("label")
macro_img = img.associated_image("macro")
thumbnail_img = img.associated_image("thumbnail")
visualize(np.asarray(small_img), 5)
visualize(np.asarray(label_img), 2)
visualize(np.asarray(macro_img), 3)
visualize(np.asarray(thumbnail_img), 4)
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Multi-thread_and_Multi-process_Tests.ipynb | #!conda install -c conda-forge openslide
# or
#!apt-get install --yes --fix-missing --no-install-recommends libopenslide0
#!pip install openslide-pythonfrom contextlib import ContextDecorator
from time import perf_counter
class Timer(ContextDecorator):
def __init__(self, message):
self.message = message
self.end = None
def elapsed_time(self):
self.end = perf_counter()
return self.end - self.start
def __enter__(self):
self.start = perf_counter()
return self
def __exit__(self, exc_type, exc, exc_tb):
if not self.end:
self.elapsed_time()
print("{} : {}".format(self.message, self.end - self.start))import numpy as np
from openslide import OpenSlide
import concurrent.futures
from cucim import CuImage
import os
num_threads = os.cpu_count()
input_file = "input/image2.tif"
start_location = 0
patch_size = 256
def load_tile_openslide(slide, start_loc, patch_size):
region = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim(slide, start_loc, patch_size):
region = slide.read_region(start_loc, [patch_size, patch_size], 0)
openslide_tot_time = 0
cucim_tot_time = 0
for num_workers in range(1, num_threads + 1):
print("# of thread : {}".format(num_workers))
openslide_time = 0
# (92344 x 81017)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
count = 0
for h in range(start_location, height, patch_size):
for w in range(start_location, width, patch_size):
count += 1
start_loc_iter = ((sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Thread elapsed time (OpenSlide)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
lambda start_loc: load_tile_openslide(slide, start_loc, patch_size),
start_loc_iter,
)
openslide_time = timer.elapsed_time()
openslide_tot_time += openslide_time
cucim_time = 0
slide = CuImage(input_file)
start_loc_iter = ((sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Thread elapsed time (cuCIM)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
lambda start_loc: load_tile_cucim(slide, start_loc, patch_size),
start_loc_iter,
)
cucim_time = timer.elapsed_time()
cucim_tot_time += cucim_time
print(" Performance gain (OpenSlide/cuCIM): {}".format(openslide_time / cucim_time))
print("Total time (OpenSlide):", openslide_tot_time)
print("Total time (cuCIM):", cucim_tot_time)
print("Average performance gain (OpenSlide/cuCIM): {}".format(openslide_tot_time / cucim_tot_time))
import concurrent.futures
from itertools import repeat
import numpy as np
from openslide import OpenSlide
from cucim import CuImage
import os
num_processes = os.cpu_count()
input_file = "input/image2.tif"
start_location = 0
patch_size = 256
def load_tile_openslide_mp(inp_file, start_loc, patch_size):
with OpenSlide(inp_file) as slide:
region = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim_mp(inp_file, start_loc, patch_size):
slide = CuImage(inp_file)
region = slide.read_region(start_loc, [patch_size, patch_size], 0)
openslide_tot_time = 0
cucim_tot_time = 0
for num_workers in range(1, num_processes + 1):
print("# of processes : {}".format(num_workers))
openslide_time = 0
# (92344 x 81017)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_iter = ((sy, sx)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Process elapsed time (OpenSlide)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_mp,
repeat(input_file),
start_loc_iter,
repeat(patch_size)
)
openslide_time = timer.elapsed_time()
openslide_tot_time += openslide_time
cucim_time = 0
slide = CuImage(input_file)
start_loc_iter = ((sy, sx)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Process elapsed time (cuCIM)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_mp,
repeat(input_file),
start_loc_iter,
repeat(patch_size)
)
cucim_time = timer.elapsed_time()
cucim_tot_time += cucim_time
print(" Performance gain (OpenSlide/cuCIM): {}".format(openslide_time / cucim_time))
print("Total time (OpenSlide):", openslide_tot_time)
print("Total time (cuCIM):", cucim_tot_time)
print("Average performance gain (OpenSlide/cuCIM): {}".format(openslide_tot_time / cucim_tot_time))
import concurrent.futures
from itertools import repeat
from functools import partial
import numpy as np
from openslide import OpenSlide
from cucim import CuImage
import os
num_processes = os.cpu_count()
input_file = "input/image2.tif"
start_location = 0
patch_size = 256
is_process_initialized = False
openslide_obj = None
cucim_obj = None
def load_tile_openslide_mp(slide, start_loc, patch_size):
region = slide.read_region(start_loc, 0, [patch_size, patch_size])
def proc_init_openslide(inp_file, f, *iters):
global is_process_initialized, openslide_obj
if not is_process_initialized:
is_process_initialized = True
openslide_obj = OpenSlide(inp_file)
return f(openslide_obj, *iters)
def load_tile_cucim_mp(slide, start_loc, patch_size):
region = slide.read_region(start_loc, [patch_size, patch_size], 0)
def proc_init_cucim(inp_file, f, *iters):
global is_process_initialized, cucim_obj
if not is_process_initialized:
is_process_initialized = True
cucim_obj = CuImage(inp_file)
return f(cucim_obj, *iters)
openslide_tot_time = 0
cucim_tot_time = 0
for num_workers in range(1, num_processes + 1):
print("# of processes : {}".format(num_workers))
openslide_time = 0
# (92344 x 81017)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_iter = ((sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Process elapsed time (OpenSlide)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
partial(proc_init_openslide, input_file, load_tile_openslide_mp),
start_loc_iter,
repeat(patch_size)
)
openslide_time = timer.elapsed_time()
openslide_tot_time += openslide_time
cucim_time = 0
slide = CuImage(input_file)
start_loc_iter = ((sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size))
with Timer(" Process elapsed time (cuCIM)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
partial(proc_init_cucim, input_file, load_tile_cucim_mp),
start_loc_iter,
repeat(patch_size)
)
cucim_time = timer.elapsed_time()
cucim_tot_time += cucim_time
print(" Performance gain (OpenSlide/cuCIM): {}".format(openslide_time / cucim_time))
print("Total time (OpenSlide):", openslide_tot_time)
print("Total time (cuCIM):", cucim_tot_time)
print("Average performance gain (OpenSlide/cuCIM): {}".format(openslide_tot_time / cucim_tot_time))
import concurrent.futures
from itertools import repeat
import numpy as np
from openslide import OpenSlide
from cucim import CuImage
import os
num_processes = os.cpu_count()
input_file = "input/image2.tif"
start_location = 0
patch_size = 256
def load_tile_openslide_chunk_mp(inp_file, start_loc_list, patch_size):
with OpenSlide(inp_file) as slide:
for start_loc in start_loc_list:
region = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim_chunk_mp(inp_file, start_loc_list, patch_size):
slide = CuImage(inp_file)
for start_loc in start_loc_list:
region = slide.read_region(start_loc, [patch_size, patch_size], 0)
openslide_tot_time = 0
cucim_tot_time = 0
print("Total # of processes : {}".format(num_processes))
for num_workers in range(1, num_processes + 1):
print("# of processes : {}".format(num_workers))
openslide_time = 0
# (92344 x 81017)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_data = [(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [start_loc_data[i:i+chunk_size] for i in range(0, len(start_loc_data), chunk_size)]
with Timer(" Process elapsed time (OpenSlide)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size)
)
openslide_time = timer.elapsed_time()
openslide_tot_time += openslide_time
cucim_time = 0
slide = CuImage(input_file)
start_loc_data = [(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [start_loc_data[i:i+chunk_size] for i in range(0, len(start_loc_data), chunk_size)]
with Timer(" Process elapsed time (cuCIM)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size)
)
cucim_time = timer.elapsed_time()
cucim_tot_time += cucim_time
print(" Performance gain (OpenSlide/cuCIM): {}".format(openslide_time / cucim_time))
print("Total time (OpenSlide):", openslide_tot_time)
print("Total time (cuCIM):", cucim_tot_time)
print("Average performance gain (OpenSlide/cuCIM): {}".format(openslide_tot_time / cucim_tot_time))
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/random_walker_example.ipynb | import time
import cupy as cp
import numpy as np
import matplotlib.pyplot as plt
durations = {}
for use_gpu in (False, True):
length = 1500
blob_size_fraction = 0.025
if use_gpu:
from cucim import skimage
from cucim.skimage.exposure import rescale_intensity
from cucim.skimage.segmentation import random_walker
try:
from cucim.skimage.data import binary_blobs
blobs = binary_blobs(length=length, seed=1, blob_size_fraction=blob_size_fraction)
except ImportError:
from skimage.data import binary_blobs
blobs = cp.asarray(binary_blobs(length=length, seed=1, blob_size_fraction=blob_size_fraction))
asnumpy = cp.asnumpy
xp = cp
device_name = 'gpu'
else:
import skimage
from skimage.data import binary_blobs
from skimage.exposure import rescale_intensity
from skimage.segmentation import random_walker
blobs = binary_blobs(length=length, seed=1, blob_size_fraction=blob_size_fraction)
asnumpy = np.asarray
xp = np
device_name = 'cpu'
# Generate noisy synthetic data
data = skimage.img_as_float(blobs)
print(f"data.shape = {data.shape}")
sigma = .3
data += xp.random.normal(loc=0, scale=sigma, size=data.shape)
data = rescale_intensity(data, in_range=(-sigma, 1 + sigma),
out_range=(-1, 1))
data = data.astype(np.float32, copy=False)
print(f"data.dtype={data.dtype}")
# The range of the binary image spans over (-1, 1).
# We choose the hottest and the coldest pixels as markers.
markers = xp.zeros(data.shape, dtype=np.uint)
markers[data < -0.95] = 1
markers[data > 0.95] = 2
tstart = time.time()
# Run random walker algorithm
labels = random_walker(data, markers, beta=5, mode='cg', tol=1e-5)
dur = time.time() - tstart
durations[device_name] = dur
print(f"Duration {device_name} = {dur} s")
# Plot results
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4.8),
sharex=True, sharey=True)
ax1.imshow(asnumpy(data[:200, :200]), vmin=-.5, vmax=1.5, cmap='gray')
ax1.axis('off')
ax1.set_title('Noisy data')
ax2.imshow(asnumpy(markers[:200, :200]), cmap='magma')
ax2.axis('off')
ax2.set_title('Markers')
ax3.imshow(asnumpy(labels[:200, :200]), cmap='gray')
ax3.axis('off')
ax3.set_title('Segmentation')
fig.tight_layout()
plt.show()
print(f"GPU Acceleration = {durations['cpu']/durations['gpu']:0.4f}") | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Accessing_File_with_GDS.ipynb | #!conda install -c pytorch -c conda-forge pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=11.0
# or
#!pip install cupy-cuda110
#!pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.htmlimport os
from cucim.clara.filesystem import CuFileDriver
fno = os.open( "input/image.tif", os.O_RDONLY | os.O_DIRECT)
fno2 = os.dup(fno)
fd = CuFileDriver(fno, False)
fd.close()
os.close(fno)
# Do not use GDS even when GDS can be supported for the file.
fd2 = CuFileDriver(fno2, True)
fd2.close()
os.close(fno2)
help(CuFileDriver.__init__)import cucim.clara.filesystem as fs
fd = fs.open("input/image.tif", "r")
fs.close(fd) # same with fd.close()
# Open file without using GDS
fd2 = fs.open("input/image.tif", "rp")
fs.close(fd2) # same with fd2.close()
from cucim.clara.filesystem import CuFileDriver
import cucim.clara.filesystem as fs
import os, numpy as np, torch
# Write a file with size 10 (in bytes)
with open("input.raw", "wb") as input_file:
input_file.write(bytearray([101, 102, 103, 104, 105, 106, 107, 108, 109, 110]))
# Create an array with size 10 (in bytes)
np_arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=np.uint8)
torch_arr = torch.from_numpy(np_arr) # Note: np_arr shares internal data with torch_arr
# Using CuFileDriver
fno = os.open( "input.raw", os.O_RDONLY)
fd = CuFileDriver(fno)
read_count = fd.pread(np_arr, 8, 0, 2) # read 8 bytes starting from file offset 0 into buffer offset 2
print("{:10} cnt: {} content: {}".format("np_arr", read_count, np_arr))
read_count = fd.pread(np_arr, 10, 0) # read 10 bytes starting from file offset 0
print("{:10} cnt: {} content: {}".format("np_arr", read_count, np_arr))
read_count = fd.pread(torch_arr.data_ptr(), 10, 3) # read 10 bytes starting from file offset 3
print("{:10} cnt: {} content: {}".format("torch_arr", read_count, torch_arr))
fd.close()
os.close(fno)
fno = os.open("output.raw", os.O_RDWR | os.O_CREAT | os.O_TRUNC)
fd = CuFileDriver(fno)
write_count = fd.pwrite(np_arr, 10, 5) # write 10 bytes from np_array to file starting from offset 5
fd.close()
os.close(fno)
print("{:10} cnt: {} content: {}".format("output.raw", write_count, list(open("output.raw", "rb").read())))
print()
# Using filesystem package
fd = fs.open("output.raw", "r")
read_count = fs.pread(fd, np_arr, 10, 0) # read 10 bytes starting from offset 0
print("{:10} cnt: {} content: {}".format("np_arr", read_count, np_arr))
fs.close(fd) # same with fd.close()
# Using 'with' statement
with fs.open("output.raw", "r") as fd:
read_count = fd.pread(np_arr, 10, 0) # read 10 bytes starting from offset 0
print("{:10} cnt: {} content: {}".format("np_arr", read_count, np_arr))
from cucim.clara.filesystem import CuFileDriver
import cucim.clara.filesystem as fs
import os
import cupy as cp
import torch
# Write a file with size 10 (in bytes)
with open("input.raw", "wb") as input_file:
input_file.write(bytearray([101, 102, 103, 104, 105, 106, 107, 108, 109, 110]))
# Create an array with size 10 (in bytes)
cp_arr = cp.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=cp.uint8)
cuda0 = torch.device('cuda:0')
torch_arr = torch.zeros(10, dtype=torch.uint8, device=cuda0)
# Using CuFileDriver
fno = os.open( "input.raw", os.O_RDONLY | os.O_DIRECT)
fd = CuFileDriver(fno)
read_count = fd.pread(cp_arr, 8, 0, 2) # read 8 bytes starting from file offset 0 into buffer offset 2
print("{:20} cnt: {} content: {}".format("np_arr", read_count, cp_arr))
read_count = fd.pread(cp_arr, 10, 0) # read 10 bytes starting from offset 0
print("{:20} cnt: {} content: {}".format("cp_arr", read_count, np_arr))
read_count = fd.pread(torch_arr, 10, 3) # read 10 bytes starting from offset 3
print("{:20} cnt: {} content: {}".format("torch_arr", read_count, torch_arr))
fd.close()
os.close(fno)
fno = os.open("output.raw", os.O_RDWR | os.O_CREAT | os.O_TRUNC)
fd = CuFileDriver(fno)
write_count = fd.pwrite(cp_arr, 10, 5) # write 10 bytes from np_array to file starting from offset 5
fd.close()
os.close(fno)
print("{:20} cnt: {} content: {}".format("output.raw", write_count, list(open("output.raw", "rb").read())))
print()
# Using filesystem package
fd = fs.open("output.raw", "r")
read_count = fs.pread(fd, cp_arr, 10, 0) # read 10 bytes starting from offset 0
print("{:20} cnt: {} content: {}".format("cp_arr", read_count, cp_arr))
fs.close(fd) # same with fd.close()
# Using 'with' statement
with fs.open("output.raw", "r") as fd:
read_count = fd.pread(cp_arr, 10, 0) # read 10 bytes starting from offset 0
print("{:10} cnt: {} content: {}".format("np_arr", read_count, cp_arr))
cp_arr.__cuda_array_interface__torch_arr.__cuda_array_interface__ | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Basic_Usage.ipynb | #!conda install -c conda-forge pillow
# or
# !pip install pillow
# !pip install numpy scipy scikit-image cupy-cuda110 # for cucim dependency (assuming that CUDA 11.0 is used for CuPy)from cucim import CuImage
img = CuImage("input/image.tif")import json
print(img.is_loaded) # True if image data is loaded & available.
print(img.device) # A device type.
print(img.ndim) # The number of dimensions.
print(img.dims) # A string containing a list of dimensions being requested.
print(img.shape) # A tuple of dimension sizes (in the order of `dims`).
print(img.size('XYC')) # Returns size as a tuple for the given dimension order.
print(img.dtype) # The data type of the image.
print(img.channel_names) # A channel name list.
print(img.spacing()) # Returns physical size in tuple.
print(img.spacing_units()) # Units for each spacing element (size is same with `ndim`).
print(img.origin) # Physical location of (0, 0, 0) (size is always 3).
print(img.direction) # Direction cosines (size is always 3x3).
print(img.coord_sys) # Coordinate frame in which the direction cosines are measured. Available Coordinate frame is not finalized yet.
print(img.associated_images) # Returns a set of associated image names.
print(json.dumps(img.resolutions, indent=2)) # Returns a dict that includes resolution information.
print(json.dumps(img.metadata, indent=2)) # A metadata object as `dict`
print(img.raw_metadata) # A raw metadata string.# Read whole slide at the lowest resolution
resolutions = img.resolutions
level_dimensions = resolutions["level_dimensions"]
level_count = resolutions["level_count"]
region = img.read_region(location=[0,0], size=level_dimensions[level_count - 1], level=level_count - 1)
region.save("thumbnail.ppm") # currently, cuCIM can save image with .ppm format.from PIL import Image
Image.open("thumbnail.ppm")region = img.read_region([10000,10000], [512,512], 0)
region.save("test.ppm")from PIL import Image
Image.open("test.ppm")import numpy as np
np_arr = np.array([1,2,3])
print(np_arr.__array_interface__)np_img_arr = np.asarray(region)
Image.fromarray(np_img_arr)import cupy as cp
cp_arr = cp.array([1,2,3])
print(cp_arr.__cuda_array_interface__)import cupy as cp
from cucim import CuImage
img = CuImage("input/image.tif")
resolutions = img.resolutions
level_dimensions = resolutions["level_dimensions"]
level_count = resolutions["level_count"]
region = img.read_region([0,0], level_dimensions[level_count - 1], level_count - 1, device="cuda")
print(region.device)
print(region.__cuda_array_interface__)
cupy_arr = cp.asarray(region)
Image.fromarray(cupy_arr.get()) | 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/notebooks/Using_Cache.ipynb | from cucim import CuImage
cache = CuImage.cache()
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f'free_memory: {cache.free_memory}')
print(f' size: {cache.size}/{cache.capacity}')
print(f' hit_count: {cache.hit_count}')
print(f' miss_count: {cache.miss_count}')
print(f' config: {cache.config}')
from cucim import CuImage
cache = CuImage.cache('per_process', memory_capacity=2048)
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f'free_memory: {cache.free_memory}')
print(f' size: {cache.size}/{cache.capacity}')
print(f' hit_count: {cache.hit_count}')
print(f' miss_count: {cache.miss_count}')
print(f' config: {cache.config}')from cucim import CuImage
from cucim.clara.cache import preferred_memory_capacity
img = CuImage('input/image.tif')
image_size = img.size('XY') # same with `img.resolutions["level_dimensions"][0]`
tile_size = img.resolutions['level_tile_sizes'][0] # default: (256, 256)
patch_size = (1024, 1024) # default: (256, 256)
bytes_per_pixel = 3 # default: 3
print(f'image size: {image_size}')
print(f'tile size: {tile_size}')
# Below three statements are the same.
memory_capacity = preferred_memory_capacity(img, patch_size=patch_size)
memory_capacity2 = preferred_memory_capacity(None, image_size, tile_size, patch_size, bytes_per_pixel)
memory_capacity3 = preferred_memory_capacity(None, image_size, patch_size=patch_size)
print(f'memory_capacity : {memory_capacity} MiB')
print(f'memory_capacity2: {memory_capacity2} MiB')
print(f'memory_capacity3: {memory_capacity3} MiB')
cache = CuImage.cache('per_process', memory_capacity=memory_capacity) # You can also manually set capacity` (e.g., `capacity=500`)
print('= Cache Info =')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f' size: {cache.size}/{cache.capacity}')
from cucim import CuImage
from cucim.clara.cache import preferred_memory_capacity
img = CuImage('input/image.tif')
memory_capacity = preferred_memory_capacity(img, patch_size=(256, 256))
new_memory_capacity = preferred_memory_capacity(img, patch_size=(512, 512))
print(f'memory_capacity : {memory_capacity} MiB')
print(f'new_memory_capacity: {new_memory_capacity} MiB')
print()
cache = CuImage.cache('per_process', memory_capacity=memory_capacity)
print('= Cache Info =')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f' size: {cache.size}/{cache.capacity}')
print()
cache.reserve(new_memory_capacity)
print('= Cache Info (update memory capacity) =')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f' size: {cache.size}/{cache.capacity}')
print()
cache.reserve(memory_capacity, capacity=500)
print('= Cache Info (update memory capacity & capacity) =')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity} # smaller `memory_capacity` value does not change this')
print(f' size: {cache.size}/{cache.capacity}')
print()
cache = CuImage.cache('no_cache')
print('= Cache Info (no cache) =')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f' size: {cache.size}/{cache.capacity}')
from cucim import CuImage
from cucim.clara.cache import preferred_memory_capacity
img = CuImage('input/image.tif')
memory_capacity = preferred_memory_capacity(img, patch_size=(256, 256))
cache = CuImage.cache('per_process', memory_capacity=memory_capacity, record_stat=True)
img.read_region((0,0), (100,100))
print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}')
region = img.read_region((0,0), (100,100))
print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}')
region = img.read_region((0,0), (100,100))
print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}')
print(f'Is recorded: {cache.record()}')
cache.record(False)
print(f'Is recorded: {cache.record()}')
region = img.read_region((0,0), (100,100))
print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}')
print()
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f'free_memory: {cache.free_memory}')
print(f' size: {cache.size}/{cache.capacity}')
print()
cache = CuImage.cache('no_cache')
print(f' type: {cache.type}({int(cache.type)})')
print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}')
print(f'free_memory: {cache.free_memory}')
print(f' size: {cache.size}/{cache.capacity}')
import json
from cucim import CuImage
cache = CuImage.cache()
config_data = {'cache': cache.config}
json_text = json.dumps(config_data, indent=4)
print(json_text)
# Save into the configuration file.
with open('.cucim.json', 'w') as fp:
fp.write(json_text) | 0 |
rapidsai_public_repos/cucim/notebooks | rapidsai_public_repos/cucim/notebooks/input/README.md |
# Test Dataset
TUPAC-TR-488.svs and TUPAC-TR-467.svs are breast cancer cases from the dataset
of Tumor Proliferation Assessment Challenge 2016 (TUPAC16 | MICCAI Grand Challenge) which are publicly
available through [The Cancer Genome Atlas (TCGA)](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga).
- Website: https://tupac.grand-challenge.org
- Data link: https://tupac.grand-challenge.org/Dataset/
- TUPAC-TR-467.svs : https://portal.gdc.cancer.gov/files/575c0465-c4bc-4ea7-ab63-ba48aa5e374b
- TUPAC-TR-488.svs : https://portal.gdc.cancer.gov/files/e27c87c9-e163-4d55-8f27-4cc7dfca08d8
- License: CC BY 3.0 (https://wiki.cancerimagingarchive.net/display/Public/TCGA-BRCA#3539225f58e64731d8e47d588cedd99d300d5d6)
- See LICENSE-3rdparty file
## Converted files
- image.tif : 256x256 multi-resolution/tiled TIF conversion of TUPAC-TR-467.svs
- image2.tif : 256x256 multi-resolution/tiled TIF conversion of TUPAC-TR-488.svs
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-cmake | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM nvidia/cuda:11.0-devel-ubuntu18.04
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
# Download and install Python3 PIP.
RUN apt-get update --yes \
&& apt-get upgrade --yes \
&& apt-get install --yes --fix-missing --no-install-recommends \
software-properties-common \
ca-certificates \
python3-minimal \
python3-pip \
&& add-apt-repository ppa:ubuntu-toolchain-r/test \
&& rm -rf /var/lib/apt/lists/*
RUN python3 --version
# Set additional environment values that make usage more pleasant.
ENV TERM=xterm-256color
# Make /usr/bin/python point to the ${VERSION_PYTHON3} version of python
RUN VERSION_PYTHON3=$(python3 --version | cut -c8-) && VERSION_PYTHON3=${VERSION_PYTHON3%.*} \
&& rm -f /usr/bin/python \
&& rm -f /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1` \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1`
# Make /usr/bin/pip point to the ${VERSION_PIP3} version of python
RUN rm -f /usr/bin/pip \
&& ln -s /usr/bin/pip3 /usr/bin/pip
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
python3-dev \
gcc-9 \
g++-9 \
libopenslide-dev \
wget \
git \
curl \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
WORKDIR /workspace
ENV HOME=/workspace
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install cmake
# Setup gcc-9
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 10 \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 20 \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 10 \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 20
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/nvvm/lib64
ENTRYPOINT ["/bin/bash"]
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-claratrain | #
# Copyright (c) 2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM nvcr.io/nvidian/dlmed/clara-train-sdk:v3.1-ga-qa-5
RUN apt-get update \
&& apt-get install --yes --fix-missing --no-install-recommends \
libopenslide0 \
&& rm -rf /var/lib/apt/lists/*
COPY ./docker/requirements-claratrain.txt ./
COPY ./*.whl ./
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install --no-cache-dir -r requirements-claratrain.txt \
&& python -m pip install cu*.whl
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/requirements-jupyter.txt | openslide-python==1.1.2
tifffile>=2022.7.28
itk==5.1.2
dask[array,delayed,distributed]==2021.2.0
dask-cuda==0.17.0
zarr==2.6.1
fsspec==0.8.5
numpy==1.19.5
opencv-contrib-python==4.5.1.48
imagecodecs>=2021.6.8
cupy-cuda110==8.4.0
jupyterlab==3.0.7
dask_labextension==5.0.0
cmake>=3.18
--extra-index-url https://developer.download.nvidia.com/compute/redist
nvidia-dali-cuda110
--find-links https://download.pytorch.org/whl/torch_stable.html
torch==1.7.1+cu110
torchvision==0.8.2+cu110
torchaudio===0.7.2
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-jupyter-gds-dev | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ARG UBUNTU_VER=18.04
FROM nvidia/cuda:11.0-devel-ubuntu${UBUNTU_VER}
ARG UBUNTU_VER=18.04
ARG NODE_VERSION=v14.13.1
ARG NODE_DISTRO=linux-x64
ENV UBUNTU_VER=${UBUNTU_VER}
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
# Download and install Python3 PIP.
RUN apt-get update --yes \
&& apt-get upgrade --yes \
&& apt-get install --yes --fix-missing --no-install-recommends \
ca-certificates \
python3-minimal \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
RUN python3 --version
# Set additional environment values that make usage more pleasant.
ENV TERM=xterm-256color
# Make /usr/bin/python point to the ${VERSION_PYTHON3} version of python
RUN VERSION_PYTHON3=$(python3 --version | cut -c8-) && VERSION_PYTHON3=${VERSION_PYTHON3%.*} \
&& rm -f /usr/bin/python \
&& rm -f /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1` \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1`
# Make /usr/bin/pip point to the ${VERSION_PIP3} version of python
RUN rm -f /usr/bin/pip \
&& ln -s /usr/bin/pip3 /usr/bin/pip
# libgl1 is needed for opencv at `cucim convert` CLI command.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
python3-dev \
gcc \
g++ \
libopenslide-dev \
libsm6 \
libxext6 \
libxrender-dev \
libglib2.0-0 \
libgl1 \
wget \
git \
xz-utils \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
WORKDIR /workspace
ENV HOME=/workspace
# Install nodejs
RUN mkdir -p /usr/local/lib/nodejs \
&& wget https://nodejs.org/dist/$NODE_VERSION/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz \
&& tar -xJvf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs \
&& rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz
ENV PATH=/usr/local/lib/nodejs/node-$NODE_VERSION-$NODE_DISTRO/bin:$PATH
COPY docker/requirements-jupyter-dev.txt ./
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install --no-cache-dir -r requirements-jupyter-dev.txt
# Install Jupyter Extensions
RUN jupyter labextension install dask-labextension \
&& jupyter serverextension enable dask_labextension
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
curl \
# build-essential \
# cmake \
# git \
# zlib1g-dev \
# libssl-dev \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
# Download TRITON Client
ARG TRITON_CLIENTS_URL=https://github.com/triton-inference-server/server/releases/download/v2.5.0/v2.5.0_ubuntu1804.clients.tar.gz
RUN mkdir -p /opt/nvidia/triton-clients \
&& curl -L ${TRITON_CLIENTS_URL} | tar xvz -C /opt/nvidia/triton-clients
RUN pip install --no-cache-dir \
/opt/nvidia/triton-clients/python/*manylinux1_x86_64.whl
# Supporting GDS
ARG GDS_VER=0.9.0
ARG MLNX_OFED_VER=5.1-2.5.8.0
COPY ./temp/gds/tools/README /usr/local/cuda/gds/
COPY ./temp/gds/samples/ /usr/local/cuda/gds/samples/
COPY ./temp/gds/tools/ /usr/local/cuda/gds/tools/
COPY ./temp/gds/lib64/cufile.h /usr/local/cuda/lib64/cufile.h
COPY ./temp/gds/lib64/libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so.${GDS_VER}
COPY ./temp/gds/lib64/libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so.${GDS_VER}
# Somehow libcufile.so.0 and libcufile_rdma.so.0 are auto-generated during the copy
#&& ln -s libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so.0 \
#&& ln -s libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so.0
RUN ln -sfn /usr/local/cuda/gds /usr/local/gds \
&& ln -s libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so \
&& ln -s libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so
# dpkg: dependency problems prevent configuration of mlnx-iproute2:
# mlnx-iproute2 depends on libcap2 (>= 1:2.10); however:
# Package libcap2 is not installed.
#
# liburcu-bp.so.6 => not found
# liburcu-cds.so.6 => not found
# libjsoncpp.so.1 => not found
RUN apt-get update \
&& apt-get install --yes --fix-missing --no-install-recommends \
libcap2 \
liburcu-dev \
libjsoncpp-dev \
&& wget http://content.mellanox.com/ofed/MLNX_OFED-${MLNX_OFED_VER}/MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64.tgz \
&& tar -xzvf MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64.tgz \
&& MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64/mlnxofedinstall --user-space-only --without-fw-update --all -q --force \
&& rm -rf MLNX_OFED_LINUX* \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
# Installation of MLNX_OFED would install python2, overwriting /usr/bin/python
RUN ln -sf python3 /usr/bin/python \
&& ln -sf pip3 /usr/bin/pip
COPY ./docker/cufile.json /etc/cufile.json
RUN sed -i 's/"allow_compat_mode": false,/"allow_compat_mode": true,/' /etc/cufile.json \
&& echo "/usr/local/gds/lib/" > /etc/ld.so.conf.d/cufile.conf \
&& ldconfig
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/nvvm/lib64
ENTRYPOINT ["/bin/bash"]
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-jupyter-dev | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM nvidia/cuda:11.0-devel-ubuntu18.04
ARG NODE_VERSION=v14.13.1
ARG NODE_DISTRO=linux-x64
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get install --yes --fix-missing --no-install-recommends \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Download and install Python3 PIP.
RUN apt-get update \
&& apt-get install --yes --fix-missing --no-install-recommends \
python3-minimal \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
RUN python3 --version
# Set additional environment values that make usage more pleasant.
ENV TERM=xterm-256color
# Make /usr/bin/python point to the ${VERSION_PYTHON3} version of python
RUN VERSION_PYTHON3=$(python3 --version | cut -c8-) && VERSION_PYTHON3=${VERSION_PYTHON3%.*} \
&& rm -f /usr/bin/python \
&& rm -f /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1` \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1`
# Make /usr/bin/pip point to the ${VERSION_PIP3} version of python
RUN rm -f /usr/bin/pip \
&& ln -s /usr/bin/pip3 /usr/bin/pip
# libgl1 is needed for opencv at `cucim convert` CLI command.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
python3-dev \
gcc \
g++ \
libopenslide-dev \
libsm6 \
libxext6 \
libxrender-dev \
libglib2.0-0 \
libgl1 \
wget \
git \
xz-utils \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
WORKDIR /workspace
ENV HOME=/workspace
# Install nodejs
RUN mkdir -p /usr/local/lib/nodejs \
&& wget https://nodejs.org/dist/$NODE_VERSION/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz \
&& tar -xJvf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs \
&& rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz
ENV PATH=/usr/local/lib/nodejs/node-$NODE_VERSION-$NODE_DISTRO/bin:$PATH
COPY ./docker/requirements-jupyter-dev.txt ./
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install --no-cache-dir -r requirements-jupyter-dev.txt
# Install Jupyter Extensions
RUN jupyter labextension install dask-labextension \
&& jupyter serverextension enable dask_labextension
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
curl \
# build-essential \
# cmake \
# git \
# zlib1g-dev \
# libssl-dev \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
# Download TRITON Client
ARG TRITON_CLIENTS_URL=https://github.com/triton-inference-server/server/releases/download/v2.5.0/v2.5.0_ubuntu1804.clients.tar.gz
RUN mkdir -p /opt/nvidia/triton-clients \
&& curl -L ${TRITON_CLIENTS_URL} | tar xvz -C /opt/nvidia/triton-clients
RUN pip install --no-cache-dir \
/opt/nvidia/triton-clients/python/*manylinux1_x86_64.whl
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/nvvm/lib64
ENTRYPOINT ["/bin/bash"]
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/cufile.json | {
// NOTE : Application can override custom configuration via export CUFILE_ENV_PATH_JSON=<filepath>
// e.g : export CUFILE_ENV_PATH_JSON="/home/<xxx>/cufile.json"
"logging": {
// log directory, if not enabled will create log file under current working directory
//"dir": "/home/<xxxx>",
// ERROR|WARN|INFO|DEBUG|TRACE (in decreasing order of priority)
"level": "ERROR"
},
"profile": {
// nvtx profiling on/off
"nvtx": false,
// cufile stats level(0-3)
"cufile_stats": 0
},
"properties": {
// max IO chunk size (parameter should be 4K aligned) used by cuFileRead/Write internally per IO request
"max_direct_io_size_kb" : 16384,
// device memory size (parameter should be 4K aligned) for reserving bounce buffers for the entire GPU
"max_device_cache_size_kb" : 131072,
// limit on maximum device memory size (parameter should be 4K aligned) that can be pinned for a given process
"max_device_pinned_mem_size_kb" : 33554432,
// true or false (true will enable asynchronous io submission to nvidia-fs driver)
// Note : currently the overall IO will still be synchronous
"use_poll_mode" : false,
// maximum IO request size (parameter should be 4K aligned) within or equal to which library will use polling for IO completion
"poll_mode_max_size_kb": 4,
// allow compat mode, this will enable use of cuFile posix read/writes
"allow_compat_mode": false,
// client-side rdma addr list for user-space file-systems(e.g ["10.0.1.0", "10.0.2.0"])
"rdma_dev_addr_list": [ ],
// load balancing policy for RDMA memory registration(MR), (RoundRobin, RoundRobinMaxMin)
// In RoundRobin, MRs will be distributed uniformly across NICS closest to a GPU
// In RoundRobinMaxMin, MRs will be distributed across NICS closest to a GPU
// with minimal sharing of NICS acros GPUS
"rdma_load_balancing_policy": "RoundRobin"
},
"fs": {
"generic": {
// for unaligned writes, setting it to true will, cuFileWrite use posix write internally instead of regular GDS write
"posix_unaligned_writes" : false
},
"lustre": {
// IO threshold for read/write (param should be 4K aligned)) equal to or below which cuFile will use posix read/write
"posix_gds_min_kb" : 0
},
"weka": {
// enable/disable RDMA write
"rdma_write_support" : false
}
},
"denylist": {
// specify list of vendor driver modules to deny for nvidia-fs (e.g. ["nvme" , "nvme_rdma"])
"drivers": [ ],
// specify list of block devices to prevent IO using cuFile (e.g. [ "/dev/nvme0n1" ])
"devices": [ ],
// specify list of mount points to prevent IO using cuFile (e.g. ["/mnt/test"])
"mounts": [ ],
// specify list of file-systems to prevent IO using cuFile (e.g ["lustre", "wekafs"])
"filesystems": [ ]
},
"miscellaneous": {
// enable only for enforcing strict checks at API level for debugging
"api_check_aggressive": false
}
}
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-jupyter | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM nvidia/cuda:11.0-devel-ubuntu18.04
ARG NODE_VERSION=v14.13.1
ARG NODE_DISTRO=linux-x64
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
# Download and install Python3 PIP.
RUN apt-get update --yes \
&& apt-get upgrade --yes \
&& apt-get install --yes --fix-missing --no-install-recommends \
ca-certificates \
python3-minimal \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
RUN python3 --version
# Set additional environment values that make usage more pleasant.
ENV TERM=xterm-256color
# Make /usr/bin/python point to the ${VERSION_PYTHON3} version of python
RUN VERSION_PYTHON3=$(python3 --version | cut -c8-) && VERSION_PYTHON3=${VERSION_PYTHON3%.*} \
&& rm -f /usr/bin/python \
&& rm -f /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1` \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1`
# Make /usr/bin/pip point to the ${VERSION_PIP3} version of python
RUN rm -f /usr/bin/pip \
&& ln -s /usr/bin/pip3 /usr/bin/pip
# libgl1 is needed for opencv at `cucim convert` CLI command.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
python3-dev \
gcc \
g++ \
libopenslide-dev \
libsm6 \
libxext6 \
libxrender-dev \
libglib2.0-0 \
libgl1 \
wget \
git \
xz-utils \
curl \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
WORKDIR /workspace
ENV HOME=/workspace
# Install nodejs
RUN mkdir -p /usr/local/lib/nodejs \
&& wget https://nodejs.org/dist/$NODE_VERSION/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz \
&& tar -xJvf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs \
&& rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz
ENV PATH=/usr/local/lib/nodejs/node-$NODE_VERSION-$NODE_DISTRO/bin:$PATH
COPY ./docker/requirements-jupyter.txt ./
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install --no-cache-dir -r requirements-jupyter.txt
# Install Jupyter Extensions
RUN jupyter labextension install dask-labextension \
&& jupyter serverextension enable dask_labextension
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/nvvm/lib64
ENTRYPOINT ["/bin/bash"]
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/requirements-claratrain.txt | openslide-python==1.1.2
tifffile>=2022.7.28
itk==5.1.2
dask[array,delayed,distributed]==2021.2.0
dask-cuda==0.17.0
zarr==2.6.1
fsspec==0.8.5
numpy # 1.17.3 already exists in the image
opencv-contrib-python==4.5.1.48
imagecodecs>=2021.6.8
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/requirements-jupyter-dev.txt | openslide-python==1.1.2
tifffile>=2022.7.28
itk==5.1.2
dask[array,delayed,distributed]==2021.2.0
dask-cuda==0.17.0
zarr==2.6.1
fsspec==0.8.5
numpy==1.19.5
opencv-contrib-python==4.5.1.48
imagecodecs>=2021.6.8
cupy-cuda110==8.4.0
jupyterlab==3.0.7
dask_labextension==5.0.0
cmake>=3.18
--extra-index-url https://developer.download.nvidia.com/compute/redist
nvidia-dali-cuda110
--find-links https://download.pytorch.org/whl/torch_stable.html
torch==1.7.1+cu110
torchvision==0.8.2+cu110
torchaudio===0.7.2
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docker/Dockerfile-jupyter-gds | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ARG UBUNTU_VER=18.04
FROM nvidia/cuda:11.0-devel-ubuntu${UBUNTU_VER}
ARG UBUNTU_VER=18.04
ARG NODE_VERSION=v14.13.1
ARG NODE_DISTRO=linux-x64
ENV UBUNTU_VER=${UBUNTU_VER}
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
# Download and install Python3 PIP.
RUN apt-get update --yes \
&& apt-get upgrade --yes \
&& apt-get install --yes --fix-missing --no-install-recommends \
ca-certificates \
python3-minimal \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
RUN python3 --version
# Set additional environment values that make usage more pleasant.
ENV TERM=xterm-256color
# Make /usr/bin/python point to the ${VERSION_PYTHON3} version of python
RUN VERSION_PYTHON3=$(python3 --version | cut -c8-) && VERSION_PYTHON3=${VERSION_PYTHON3%.*} \
&& rm -f /usr/bin/python \
&& rm -f /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1` \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python \
&& ln -s /usr/bin/python${VERSION_PYTHON3} /usr/bin/python`echo ${VERSION_PYTHON3} | cut -c1-1`
# Make /usr/bin/pip point to the ${VERSION_PIP3} version of python
RUN rm -f /usr/bin/pip \
&& ln -s /usr/bin/pip3 /usr/bin/pip
# libgl1 is needed for opencv at `cucim convert` CLI command.
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
python3-dev \
gcc \
g++ \
libopenslide-dev \
libsm6 \
libxext6 \
libxrender-dev \
libglib2.0-0 \
libgl1 \
wget \
git \
xz-utils \
curl \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
WORKDIR /workspace
ENV HOME=/workspace
# Install nodejs
RUN mkdir -p /usr/local/lib/nodejs \
&& wget https://nodejs.org/dist/$NODE_VERSION/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz \
&& tar -xJvf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs \
&& rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz
ENV PATH=/usr/local/lib/nodejs/node-$NODE_VERSION-$NODE_DISTRO/bin:$PATH
COPY ./docker/requirements-jupyter.txt ./
# Use `python -m pip` to avoid using an old script wrapper.
RUN python -m pip install --no-cache-dir --upgrade pip setuptools wheel \
&& python -m pip install --no-cache-dir -r requirements-jupyter.txt
# Install Jupyter Extensions
RUN jupyter labextension install dask-labextension \
&& jupyter serverextension enable dask_labextension
# Supporting GDS
ARG GDS_VER=0.9.0
ARG MLNX_OFED_VER=5.1-2.5.8.0
COPY ./temp/gds/tools/README /usr/local/cuda/gds/
COPY ./temp/gds/samples/ /usr/local/cuda/gds/samples/
COPY ./temp/gds/tools/ /usr/local/cuda/gds/tools/
COPY ./temp/gds/lib64/cufile.h /usr/local/cuda/lib64/cufile.h
COPY ./temp/gds/lib64/libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so.${GDS_VER}
COPY ./temp/gds/lib64/libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so.${GDS_VER}
# Somehow libcufile.so.0 and libcufile_rdma.so.0 are auto-generated during the copy
#&& ln -s libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so.0 \
#&& ln -s libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so.0
RUN ln -sfn /usr/local/cuda/gds /usr/local/gds \
&& ln -s libcufile.so.${GDS_VER} /usr/local/cuda/lib64/libcufile.so \
&& ln -s libcufile_rdma.so.${GDS_VER} /usr/local/cuda/lib64/libcufile_rdma.so
# dpkg: dependency problems prevent configuration of mlnx-iproute2:
# mlnx-iproute2 depends on libcap2 (>= 1:2.10); however:
# Package libcap2 is not installed.
#
# liburcu-bp.so.6 => not found
# liburcu-cds.so.6 => not found
# libjsoncpp.so.1 => not found
RUN apt-get update \
&& apt-get install --yes --fix-missing --no-install-recommends \
libcap2 \
liburcu-dev \
libjsoncpp-dev \
&& wget http://content.mellanox.com/ofed/MLNX_OFED-${MLNX_OFED_VER}/MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64.tgz \
&& tar -xzvf MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64.tgz \
&& MLNX_OFED_LINUX-${MLNX_OFED_VER}-ubuntu${UBUNTU_VER}-x86_64/mlnxofedinstall --user-space-only --without-fw-update --all -q --force \
&& rm -rf MLNX_OFED_LINUX* \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /var/cache/apt/archives/partial/*
# Installation of MLNX_OFED would install python2, overwriting /usr/bin/python
RUN ln -sf python3 /usr/bin/python \
&& ln -sf pip3 /usr/bin/pip
COPY ./docker/cufile.json /etc/cufile.json
RUN sed -i 's/"allow_compat_mode": false,/"allow_compat_mode": true,/' /etc/cufile.json \
&& echo "/usr/local/gds/lib/" > /etc/ld.so.conf.d/cufile.conf \
&& ldconfig
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/nvvm/lib64
ENTRYPOINT ["/bin/bash"]
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/.idea/misc.xml | <?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CMakeWorkspace" PROJECT_DIR="$PROJECT_DIR$" />
<component name="JavaScriptSettings">
<option name="languageLevel" value="ES6" />
</component>
</project>
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/.idea/cucim.iml | <?xml version="1.0" encoding="UTF-8"?>
<module classpath="CMake" type="CPP_MODULE" version="4" />
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/.idea/vcs.xml | <?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
| 0 |
rapidsai_public_repos/cucim/.idea | rapidsai_public_repos/cucim/.idea/codeStyles/codeStyleConfig.xml | <component name="ProjectCodeStyleConfiguration">
<state>
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
</state>
</component>
| 0 |
rapidsai_public_repos/cucim/.idea | rapidsai_public_repos/cucim/.idea/codeStyles/Project.xml | <component name="ProjectCodeStyleConfiguration">
<code_scheme name="Project" version="173">
<clangFormatSettings>
<option name="ENABLED" value="true" />
</clangFormatSettings>
</code_scheme>
</component>
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/includes/NVIDIA_CMAKE_HEADER.cmake | #
# Copyright (c) $YEAR, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/includes/NVIDIA_C_HEADER.h | /*
* Copyright (c) $YEAR, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/internal/CMakeLists.txt.cmake | #parse("NVIDIA_CMAKE_HEADER.cmake")
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/internal/C Header File.h | #parse("NVIDIA_C_HEADER.h")
#[[#ifndef]]# ${INCLUDE_GUARD}
#[[#define]]# ${INCLUDE_GUARD}
#[[#endif]]# //${INCLUDE_GUARD}
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/internal/C++ Class.cc | #parse("NVIDIA_C_HEADER.h")
#[[#include]]# "${HEADER_FILENAME}"
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/internal/C Source File.c | #parse("NVIDIA_C_HEADER.h")
#if (${HEADER_FILENAME})
#[[#include]]# "${HEADER_FILENAME}"
#end
| 0 |
rapidsai_public_repos/cucim/.idea/fileTemplates | rapidsai_public_repos/cucim/.idea/fileTemplates/internal/C++ Class Header.h | #parse("NVIDIA_C_HEADER.h")
#[[#ifndef]]# ${INCLUDE_GUARD}
#[[#define]]# ${INCLUDE_GUARD}
${NAMESPACES_OPEN}
class ${NAME} {
};
${NAMESPACES_CLOSE}
#[[#endif]]# //${INCLUDE_GUARD}
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docs/requirement.txt | nbsphinx
recommonmark
sphinx
pydata-sphinx-theme
numpydoc
ipython
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docs/references.css | /* Fix references to not look like parameters */
dl.citation > dt.label {
display: unset !important;
float: left !important;
border: unset !important;
background: unset !important;
padding: unset !important;
margin: unset !important;
font-size: unset !important;
line-height: unset !important;
padding-right: 0.5rem !important;
}
/* Add opening bracket */
dl.citation > dt.label > span::before {
content: "[";
}
/* Add closing bracket */
dl.citation > dt.label > span::after {
content: "]";
}
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docs/README.md | # Building Documentation
A basic python environment with packages listed in `./requirement.txt` is
enough to build the docs.
## Get additional dependency
```bash
pip install -r requirement.txt
```
## Run makefile:
```bash
make html
```
Outputs to `build/html/index.html`
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docs/make.bat | @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
set SPHINXPROJ=cuImage
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = cuCIM
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
rapidsai_public_repos/cucim/docs | rapidsai_public_repos/cucim/docs/adr/README.md | # Architecture Decision Record
Documents in this folder follow the concept from [this blog - Why Write ADRs?](https://github.blog/2020-08-13-why-write-adrs/).
Please refer to https://github.com/joelparkerhenderson/architecture_decision_record for examples.
## ADR file name conventions
- The name starts with three-digit number to specify the ID of the ADR. e.g., `001_choose_database.md`.
- The name has a present tense imperative verb phrase. This helps readability and matches our commit message format.
- The name uses lowercase and underscores (same as this repo). This is a balance of readability and system usability.
- The extension is markdown. This can be useful for easy formatting.
Example
- 001_choose_database.md
- 002_format_timestamps.md
- 003_manage_passwords.md
- 004_handle_exceptions.md
| 0 |
rapidsai_public_repos/cucim/docs | rapidsai_public_repos/cucim/docs/adr/example.md | # 001 - Choose Programming languages
Note: this example is from https://github.com/joelparkerhenderson/architecture_decision_record/blob/master/examples/programming-languages.md#related-decisions
Contents:
* [Summary](#summary)
* [Issue](#issue)
* [Decision](#decision)
* [Status](#status)
* [Details](#details)
* [Assumptions](#assumptions)
* [Constraints](#constraints)
* [Positions](#positions)
* [Argument](#argument)
* [Implications](#implications)
* [Related](#related)
* [Related decisions](#related-decisions)
* [Related requirements](#related-requirements)
* [Related artifacts](#related-artifacts)
* [Related principles](#related-principles)
* [Notes](#notes)
## Summary
### Issue
We need to choose programming languages for our software. We have two major needs: a front-end programming language suitable for web applications, and a back-end programming language suitable for server applications.
### Decision
We are choosing TypeScript for the front-end.
We are choosing Rust for the back-end.
### Status
Decided. We are open to new alternatives as they arise.
## Details
### Assumptions
The front-end applications are typical:
* Typical users and interactions
* Typical browsers and systems
* Typical developments and deployments
The front-end applications is likely to evolve quickly:
* We want to ensure fast easy developments, deployments, iterations, etc.
* We value provability, such as type safety, and we are fine doing a bit more work to achieve it.
* We do not need legacy compatibility.
The back-end applications are higher-than-typical:
* Higher-than-typical goals for quality, especially provability, reliability, security, etc.
* Higher-than-typical goals for near-real-time, i.e. we do not want pauses due to virtual machine garbage collection.
* Higher-than-typical goals for functional programming, especially for parallelization, multi-core processing, and memory safety.
We accept lower compile-time speeds in favor of compile-time safety and runtime speeds.
### Constraints
We have a strong constraint on languages that are usable with major cloud provider services for functions, such as Amazon Lambda.
### Positions
We considered these languages:
* C
* C++
* Clojure
* Elixir
* Erlang
* Elm
* Flow
* Go
* Haskell
* Java
* JavaScript
* Kotlin
* Python
* Ruby
* Rust
* TypeScript
### Argument
Summary per language:
* C: rejected because of low safety; Rust can do nearly everything better.
* C++: rejected because it's a mess; Rush can do nearly everything better.
* Clojure: excellent modeling; best Lisp approximation; great runtime on the JVM.
* Elixir: excellent runtime including deployability and concurrency; excellent developer experience; relatively small ecosystem.
* Erlang: excellent runtime including deployability and concurrency; challenging developer experience; relatively small ecosystem.
* Elm: looks very promising; IBM is publishing major case studies with good resutls; smaller ecosystem.
* Flow: interesting improvement over JavaScript; however; developers are moving away from it.
* Go: excellent developer experience; excellent concurrency; but a track record of bad decisions that cripple the language.
* Haskell: best functional language; smaller developer community; hasn't achieved enough published production successes.
* Java: excellent runtime; excellent ecosystem; sub-par developer experience.
* JavaScript: most popular language ever; most widespread ecosystem.
* Kotlin: fixes so much of Java; excellent backing by JetBrains; good published cases of porting from Java to Kotlin.
* Python: most popular language for systems administration; great analytics tooling; good web frameworks; but abandoned by Google in favor of Go.
* Ruby: best developer experience ever; best web frameworks; nicest community; but very slow; somewhat hard to package.
* Rust: best new language; zero-abstraction emphasis; concurrency emphasis; however relatively small ecosystem; and has deliberate limits on some kinds of compiler accelerations e.g. direct memory access needs to be explicitly unsafe.
* TypeScript: adds types to JavaScript; great transpiler; growing developer emphasis on porting from JavaScript to TypeScript; strong backing from Microsoft.
We decided that VMs have a set of tradeoffs that we do not need right now, such as additional complexity that provides runtime capabilities.
We believe that our core decision is driven by two cross-cutting concerns:
* For fastest runtime speed and tightest system access, we would choose JavaScript and C.
* For close-to-fastest runtime speed and close-to-tightest system access, we choose TypeScript and Rust.
Honorable mentions go to the VM languages and web frameworks that we would choose if we wanted a VM language:
* Closure and Luminous
* Java and Spring
* Elixir and Phoenix
### Implications
Front-end developers will need to learn TypeScript. This is likely an easy learning curve if the developer's primary experience is using JavaScript.
Back-end developers will need to learn Rust. This is likely a moderate learning curve if the developer's primary experience is using C/C++, and a hard learning curve if the developer's primary experience is using Java, Python, Ruby, or similar memory-managed languages.
TypeScript and Rust are both relatively new. This means that many tools do not yet have documentation for these languages. For example, the devops pipeline will need to be set up for these languages, and so far, none of the devops tools that we are evaluating have default examples for these languages.
Compile times for TypeScript and Rust are quite slow. Some of this may be due to the newness of the languages. We may want to look at how to mitigate slow compile times, such as by compile-on-demand, compile-concurrency, etc.
IDE support for these languages is not yet ubiquitous and not yet first-class. For example, JetBrains sells the PyCharm IDE for first-class support for Python, but does not sell and IDE with first-class support for Rust; instead, JetBrains can use a Rust plug-in that provides perhaps 80% of Rust language support vis a vis Python language support.
## Related
### Related decisions
We will aim toward ecosystem choices that align with these languages.
For example, we want to choose an IDE that has good capabilities for these languages.
For example, for our front-end web framework, we are more-likley to decide on a framework that tends to aim toward TypeScript (e.g. Vue) than a framework that tends to aim toward plain JavaScript (e.g. React).
### Related requirements
Our entire toolchain must support these languages.
### Related artifacts
We expect we may export some secrets to environment variables.
### Related principles
Measure twice, build once. We are prioritizing some safety over some speed.
Runtime is more valuable than compile time. We are prioritizing customer usage over developer usage.
## Notes
Any notes here.
| 0 |
rapidsai_public_repos/cucim/docs | rapidsai_public_repos/cucim/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# pygdf documentation build configuration file, created by
# sphinx-quickstart on Wed May 3 10:59:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, "ext"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"numpydoc",
"doi_role",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"nbsphinx",
"recommonmark",
]
ipython_mplbackend = "str"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "cuCIM"
copyright = "2020-2021, NVIDIA"
author = "NVIDIA"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "23.12"
# The full version, including alpha/beta/rc tags.
release = "23.12.00"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "cucimdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"cucim.tex",
"cucim Documentation",
"NVIDIA Corporation",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "cucim", "cucim Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"cucim",
"cucim Documentation",
author,
"cucim",
"One line description of project.",
"Miscellaneous",
),
]
# Configuration for intersphinx: refer to other projects documentation.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"cupy": ("https://docs.cupy.dev/en/stable/", None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"skimage": ("https://scikit-image.org/docs/stable/", None),
}
# Config numpydoc
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
def setup(app):
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_js_file(
"https://docs.rapids.ai/assets/js/custom.js", loading_method="defer"
)
| 0 |
rapidsai_public_repos/cucim/docs | rapidsai_public_repos/cucim/docs/source/api.rst | ~~~~~~~~~~~~~~~~~~~~~~
cuCIM API Reference
~~~~~~~~~~~~~~~~~~~~~~
Clara Submodules
================
.. automodule:: cucim.clara
:members:
:undoc-members:
cache
-----
.. automodule:: cucim.clara.cache
:members:
:undoc-members:
filesystem
----------
.. automodule:: cucim.clara.filesystem
:members:
:undoc-members:
io
---
.. automodule:: cucim.clara.io
:members:
:undoc-members:
core Submodules
===============
color
-----
.. automodule:: cucim.core.operations.color
:members:
:undoc-members:
expose
------
.. automodule:: cucim.core.operations.expose
:members:
:undoc-members:
intensity
---------
.. automodule:: cucim.core.operations.intensity
:members:
:undoc-members:
morphology
----------
.. automodule:: cucim.core.operations.morphology
:members:
:undoc-members:
spatial
-------
.. automodule:: cucim.core.operations.spatial
:members:
:undoc-members:
skimage Submodules
==================
color
-----
.. automodule:: cucim.skimage.color
:members:
:undoc-members:
data
----
.. automodule:: cucim.skimage.data
:members:
:undoc-members:
exposure
--------
.. automodule:: cucim.skimage.exposure
:members:
:undoc-members:
feature
-------
.. automodule:: cucim.skimage.feature
:members:
:undoc-members:
filters
-------
.. automodule:: cucim.skimage.filters
:members:
:undoc-members:
measure
-------
.. automodule:: cucim.skimage.measure
:members:
:undoc-members:
metrics
-------
.. automodule:: cucim.skimage.metrics
:members:
:undoc-members:
morphology
----------
.. automodule:: cucim.skimage.morphology
:members:
:undoc-members:
registration
------------
.. automodule:: cucim.skimage.registration
:members:
:undoc-members:
restoration
-----------
.. automodule:: cucim.skimage.restoration
:members:
:undoc-members:
segmentation
------------
.. automodule:: cucim.skimage.segmentation
:members:
:undoc-members:
transform
---------
.. automodule:: cucim.skimage.transform
:members:
:undoc-members:
util
----
.. automodule:: cucim.skimage.util
:members:
:undoc-members:
Submodule Contents
==================
skimage
-------
.. automodule:: cucim.skimage
:members:
:undoc-members:
| 0 |
rapidsai_public_repos/cucim/docs | rapidsai_public_repos/cucim/docs/source/index.rst |
Welcome to cuCIM's documentation!
====================================
cuCIM (Compute Unified Device Architecture Clara IMage) is an open-source, accelerated computer vision and image processing software library for multidimensional images used in biomedical, geospatial, material and life science, and remote sensing use cases.
cuCIM provides GPU-accelearted I/O,
computer vision and image processing primitives for N-Dimensional images including:
- color conversion
- exposure
- feature extraction
- filters
- measure
- metrics
- morphology
- registration
- restoration
- segmentation
- transforms
cuCIM supports the following formats:
- Aperio ScanScope Virtual Slide (SVS)
- Philips TIFF
- Generic Tiled, Multi-resolution RGB TIFF files with the following
compression schemes:
- No Compression
- JPEG
- JPEG2000
- Lempel-Ziv-Welch (LZW)
- Deflate
Our API mirrors `scikit-image
<https://scikit-image.org/>`_ for image manipulation and `OpenSlide
<https://openslide.org/>`_ for image loading.
cuCIM is interoperable with the following workflows:
- Albumentations
- cuPY
- Data Loading Library (DALI)
- JFX
- MONAI
- Numba
- NumPy
- PyTorch
- Tensorflow
- Triton
cuCIM is fully open sourced under the Apache-2.0 license, and the Clara
and RAPIDS teams welcomes new and seasoned contributors, users and
hobbyists! You may download cuCIM via Anaconda `Conda`_ or `PyPI`_ Thank
you for your wonderful support! Below, we provide some resources to help
get you started.
**Blogs**
- `Accelerating Scikit-Image API with cuCIM: n-Dimensional Image
Processing and IO on GPUs`_
- `Accelerating Digital Pathology Pipelines with NVIDIA Clara™ Deploy`_
**Webinars**
- `cuCIM: a GPU Image IO and Processing Library`_
.. _Conda: https://anaconda.org/rapidsai-nightly/cucim
.. _PyPi: https://pypi.org/project/cucim/
.. _`Accelerating Scikit-Image API with cuCIM: n-Dimensional Image Processing and IO on GPUs`: https://developer.nvidia.com/blog/cucim-rapid-n-dimensional-image-processing-and-i-o-on-gpus/
.. _Accelerating Digital Pathology Pipelines with NVIDIA Clara™ Deploy: https://developer.nvidia.com/blog/accelerating-digital-pathology-pipelines-with-nvidia-clara-deploy-2/
.. _`cuCIM: a GPU Image IO and Processing Library`: https://www.youtube.com/watch?v=G46kOOM9xbQ
Contents
==================
.. toctree::
:maxdepth: 4
api.rst
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0 |
rapidsai_public_repos/cucim/docs/source | rapidsai_public_repos/cucim/docs/source/ext/doi_role.py | # From scikit-image:
# https://github.com/scikit-image/scikit-image/blob/16e0b87b8cb1abc4c78ebf6cd013dadc90810f39/doc/ext/doi_role.py
# -*- coding: utf-8 -*-
"""
doilinks
~~~~~~~~~~~~~~~~~~~
Extension to add links to DOIs. With this extension you can use e.g.
:doi:`10.1016/S0022-2836(05)80360-2` in your documents. This will
create a link to a DOI resolver
(``https://doi.org/10.1016/S0022-2836(05)80360-2``).
The link caption will be the raw DOI.
You can also give an explicit caption, e.g.
:doi:`Basic local alignment search tool <10.1016/S0022-2836(05)80360-2>`.
:copyright: Copyright 2015 Jon Lund Steffensen. Based on extlinks by
the Sphinx team.
:license: BSD.
"""
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
def doi_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
full_url = "https://doi.org/" + part
if not has_explicit_title:
title = "DOI:" + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def arxiv_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
full_url = "https://arxiv.org/abs/" + part
if not has_explicit_title:
title = "arXiv:" + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
def setup_link_role(app):
app.add_role("doi", doi_role)
app.add_role("DOI", doi_role)
app.add_role("arXiv", arxiv_role)
app.add_role("arxiv", arxiv_role)
def setup(app):
app.connect("builder-inited", setup_link_role)
return {"version": "0.1", "parallel_read_safe": True}
| 0 |
rapidsai_public_repos/cucim/experiments | rapidsai_public_repos/cucim/experiments/Using_Cache/benchmark.py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
from contextlib import ContextDecorator
from datetime import datetime
from itertools import repeat
from time import perf_counter
import numpy as np
import rasterio
from openslide import OpenSlide
from rasterio.windows import Window
from cucim import CuImage
class Timer(ContextDecorator):
def __init__(self, message):
self.message = message
self.end = None
def elapsed_time(self):
self.end = perf_counter()
return self.end - self.start
def __enter__(self):
self.start = perf_counter()
return self
def __exit__(self, exc_type, exc, exc_tb):
if not self.end:
self.elapsed_time()
print("{} : {}".format(self.message, self.end - self.start))
def load_tile_openslide(slide, start_loc, patch_size):
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_openslide_chunk(inp_file, start_loc_list, patch_size):
with OpenSlide(inp_file) as slide:
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim(slide, start_loc, patch_size):
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size):
try:
slide = CuImage(inp_file)
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
except Exception as e:
print(e)
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
def load_tile_rasterio(slide, start_loc, tile_size):
_ = np.moveaxis(
slide.read(
[1, 2, 3],
window=Window.from_slices(
(start_loc[0], start_loc[0] + tile_size),
(start_loc[1], start_loc[1] + tile_size),
),
),
0,
-1,
)
def load_tile_rasterio_chunk(input_file, start_loc_list, patch_size):
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
slide = rasterio.open(input_file, transform=identity, num_threads=1)
for start_loc in start_loc_list:
_ = np.moveaxis(
slide.read(
[1, 2, 3],
window=Window.from_slices(
(start_loc[0], start_loc[0] + patch_size),
(start_loc[1], start_loc[1] + patch_size),
),
),
0,
-1,
)
def load_tile_openslide_chunk_mp(inp_file, start_loc_list, patch_size):
with OpenSlide(inp_file) as slide:
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim_chunk_mp(inp_file, start_loc_list, patch_size):
slide = CuImage(inp_file)
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
def load_tile_rasterio_chunk_mp(input_file, start_loc_list, patch_size):
slide = rasterio.open(input_file, num_threads=1)
for start_loc in start_loc_list:
_ = np.moveaxis(
slide.read(
[1, 2, 3],
window=Window.from_slices(
(start_loc[0], start_loc[0] + patch_size),
(start_loc[1], start_loc[1] + patch_size),
),
),
0,
-1,
)
def experiment_thread(
cache_strategy, input_file, num_threads, start_location, patch_size
):
import psutil
print(" ", psutil.virtual_memory())
# range(1, num_threads + 1): # (num_threads,):
for num_workers in range(1, num_threads + 1):
openslide_time = 1
cucim_time = 1
rasterio_time = 1
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Thread elapsed time (OpenSlide)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_chunk,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
openslide_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20
cache = CuImage.cache(
cache_strategy, memory_capacity=cache_size, record_stat=True
)
cucim_time = 0
slide = CuImage(input_file)
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Thread elapsed time (cuCIM)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_chunk,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
cucim_time = timer.elapsed_time()
print(f" hit: {cache.hit_count} miss: {cache.miss_count}")
print(" ", psutil.virtual_memory())
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Thread elapsed time (rasterio)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_rasterio_chunk,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
rasterio_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501
with open("experiment.txt", "a+") as f:
f.write(output_text)
print(output_text)
def experiment_process(
cache_strategy, input_file, num_processes, start_location, patch_size
):
import psutil
print(" ", psutil.virtual_memory())
for num_workers in range(1, num_processes + 1):
openslide_time = 1
cucim_time = 1
rasterio_time = 1
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Process elapsed time (OpenSlide)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
openslide_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20
if cache_strategy == "shared_memory":
cache_size = cache_size * num_workers
cache = CuImage.cache(
cache_strategy, memory_capacity=cache_size, record_stat=True
)
cucim_time = 0
slide = CuImage(input_file)
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Process elapsed time (cuCIM)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
cucim_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
rasterio_time = 0
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Process elapsed time (rasterio)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_rasterio_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
rasterio_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501
with open("experiment.txt", "a+") as f:
f.write(output_text)
print(output_text)
experiment_thread("nocache", "notebooks/input/image.tif", 12, 0, 256)
experiment_process("nocache", "notebooks/input/image.tif", 12, 0, 256)
experiment_thread("per_process", "notebooks/input/image.tif", 12, 0, 256)
experiment_process("per_process", "notebooks/input/image.tif", 12, 0, 256)
experiment_thread("shared_memory", "notebooks/input/image.tif", 12, 0, 256)
experiment_process("shared_memory", "notebooks/input/image.tif", 12, 0, 256)
experiment_thread("nocache", "notebooks/input/image.tif", 12, 1, 256)
experiment_process("nocache", "notebooks/input/image.tif", 12, 1, 256)
experiment_thread("per_process", "notebooks/input/image.tif", 12, 1, 256)
experiment_process("per_process", "notebooks/input/image.tif", 12, 1, 256)
experiment_thread("shared_memory", "notebooks/input/image.tif", 12, 1, 256)
experiment_process("shared_memory", "notebooks/input/image.tif", 12, 1, 256)
experiment_thread("nocache", "notebooks/input/image2.tif", 12, 0, 256)
experiment_process("nocache", "notebooks/input/image2.tif", 12, 0, 256)
experiment_thread("per_process", "notebooks/input/image2.tif", 12, 0, 256)
experiment_process("per_process", "notebooks/input/image2.tif", 12, 0, 256)
experiment_thread("shared_memory", "notebooks/input/image2.tif", 12, 0, 256)
experiment_process("shared_memory", "notebooks/input/image2.tif", 12, 0, 256)
experiment_thread("nocache", "notebooks/input/image2.tif", 12, 1, 256)
experiment_process("nocache", "notebooks/input/image2.tif", 12, 1, 256)
experiment_thread("per_process", "notebooks/input/image2.tif", 12, 1, 256)
experiment_process("per_process", "notebooks/input/image2.tif", 12, 1, 256)
experiment_thread("shared_memory", "notebooks/input/image2.tif", 12, 1, 256)
experiment_process("shared_memory", "notebooks/input/image2.tif", 12, 1, 256)
experiment_thread("nocache", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_process("nocache", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_thread("per_process", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_process("per_process", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_thread("shared_memory", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_process("shared_memory", "notebooks/0486052bb.tiff", 12, 0, 1024)
experiment_thread("nocache", "notebooks/0486052bb.tiff", 12, 1, 1024)
experiment_process("nocache", "notebooks/0486052bb.tiff", 12, 1, 1024)
experiment_thread("per_process", "notebooks/0486052bb.tiff", 12, 1, 1024)
experiment_process("per_process", "notebooks/0486052bb.tiff", 12, 1, 1024)
experiment_thread("shared_memory", "notebooks/0486052bb.tiff", 12, 1, 1024)
experiment_process("shared_memory", "notebooks/0486052bb.tiff", 12, 1, 1024)
| 0 |
rapidsai_public_repos/cucim/experiments | rapidsai_public_repos/cucim/experiments/Supporting_Aperio_SVS_Format/benchmark.py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
from contextlib import ContextDecorator
from datetime import datetime
from itertools import repeat
from time import perf_counter
from openslide import OpenSlide
from cucim import CuImage
from cucim.clara.filesystem import discard_page_cache # noqa: F401
class Timer(ContextDecorator):
def __init__(self, message):
self.message = message
self.end = None
def elapsed_time(self):
self.end = perf_counter()
return self.end - self.start
def __enter__(self):
self.start = perf_counter()
return self
def __exit__(self, exc_type, exc, exc_tb):
if not self.end:
self.elapsed_time()
print("{} : {}".format(self.message, self.end - self.start))
def load_tile_openslide(slide, start_loc, patch_size):
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_openslide_chunk(inp_file, start_loc_list, patch_size):
with OpenSlide(inp_file) as slide:
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim(slide, start_loc, patch_size):
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
def load_tile_cucim_chunk(inp_file, start_loc_list, patch_size):
try:
slide = CuImage(inp_file)
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
except Exception as e:
print(e)
def load_tile_openslide_chunk_mp(inp_file, start_loc_list, patch_size):
with OpenSlide(inp_file) as slide:
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, 0, [patch_size, patch_size])
def load_tile_cucim_chunk_mp(inp_file, start_loc_list, patch_size):
slide = CuImage(inp_file)
for start_loc in start_loc_list:
_ = slide.read_region(start_loc, [patch_size, patch_size], 0)
def experiment_thread(
cache_strategy, input_file, num_threads, start_location, patch_size
):
import psutil
print(" ", psutil.virtual_memory())
for num_workers in (1, 3, 6, 9, 12): # range(1, num_threads + 1):
openslide_time = 1
cucim_time = 1
rasterio_time = 1
# discard_page_cache(input_file)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Thread elapsed time (OpenSlide)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_chunk,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
openslide_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20
cache = CuImage.cache(
cache_strategy, memory_capacity=cache_size, record_stat=True
)
cucim_time = 0
# discard_page_cache(input_file)
slide = CuImage(input_file)
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Thread elapsed time (cuCIM)") as timer:
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_chunk,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
cucim_time = timer.elapsed_time()
print(f" hit: {cache.hit_count} miss: {cache.miss_count}")
print(" ", psutil.virtual_memory())
output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},thread,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501
with open("experiment.txt", "a+") as f:
f.write(output_text)
print(output_text)
def experiment_process(
cache_strategy, input_file, num_processes, start_location, patch_size
):
import psutil
print(" ", psutil.virtual_memory())
for num_workers in (1, 3, 6, 9, 12): # range(1, num_processes + 1):
openslide_time = 1
cucim_time = 1
rasterio_time = 1
# discard_page_cache(input_file)
with OpenSlide(input_file) as slide:
width, height = slide.dimensions
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Process elapsed time (OpenSlide)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_openslide_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
openslide_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
cache_size = psutil.virtual_memory().available // 1024 // 1024 // 20
if cache_strategy == "shared_memory":
cache_size = cache_size * num_workers
cache = CuImage.cache(
cache_strategy, memory_capacity=cache_size, record_stat=True
)
cucim_time = 0
# discard_page_cache(input_file)
slide = CuImage(input_file)
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
with Timer(" Process elapsed time (cuCIM)") as timer:
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers
) as executor:
executor.map(
load_tile_cucim_chunk_mp,
repeat(input_file),
start_loc_list_iter,
repeat(patch_size),
)
cucim_time = timer.elapsed_time()
print(" ", psutil.virtual_memory())
rasterio_time = 0
start_loc_data = [
(sx, sy)
for sy in range(start_location, height, patch_size)
for sx in range(start_location, width, patch_size)
]
chunk_size = len(start_loc_data) // num_workers
start_loc_list_iter = [
start_loc_data[i : i + chunk_size]
for i in range(0, len(start_loc_data), chunk_size)
]
print(" ", psutil.virtual_memory())
output_text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')},process,{cache_strategy},{input_file},{start_location},{patch_size},{num_workers},{openslide_time},{cucim_time},{rasterio_time},{openslide_time / cucim_time},{rasterio_time / cucim_time},{cache_size},{cache.hit_count},{cache.miss_count}\n" # noqa: E501
with open("experiment.txt", "a+") as f:
f.write(output_text)
print(output_text)
for i in range(10):
experiment_thread(
"per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240
)
experiment_thread(
"per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256
)
experiment_thread(
"per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240
)
experiment_process(
"per_process", "notebooks/input/TUPAC-TR-488.svs", 12, 120, 240
)
experiment_process(
"per_process", "notebooks/input/JP2K-33003-2.svs", 12, 128, 256
)
experiment_process(
"per_process", "notebooks/input/CMU-1-JP2K-33005.svs", 12, 120, 240
)
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/test_python.sh | #!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Common setup steps shared by Python test jobs
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate Python testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test_python \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}
RAPIDS_COVERAGE_DIR=${RAPIDS_COVERAGE_DIR:-"${PWD}/coverage-results"}
mkdir -p "${RAPIDS_TESTS_DIR}" "${RAPIDS_COVERAGE_DIR}"
rapids-print-env
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
libcucim cucim
rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
rapids-logger "pytest cucim"
pushd python/cucim
timeout 20m pytest \
--cache-clear \
--junitxml="${RAPIDS_TESTS_DIR}/junit-cucim.xml" \
--numprocesses=8 \
--dist=loadscope \
--cov-config=.coveragerc \
--cov=cucim \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cucim-coverage.xml" \
--cov-report=term \
-v \
src \
tests/unit \
tests/performance
popd
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/build_python.sh | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
package_name="cucim"
package_dir="python/cucim"
package_src_dir="${package_dir}/src/${package_name}"
version=$(rapids-generate-version)
commit=$(git rev-parse HEAD)
echo "${version}" > VERSION
sed -i "/^__git_commit__/ s/= .*/= \"${commit}\"/g" "${package_src_dir}/_version.py"
rapids-logger "Begin py build"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
# TODO: Remove `--no-test` flag once importing on a CPU
# node works correctly
RAPIDS_PACKAGE_VERSION=${version} rapids-conda-retry mambabuild \
--no-test \
--channel "${CPP_CHANNEL}" \
conda/recipes/cucim
rapids-upload-conda-to-s3 python
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/build_wheel.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
package_name="cucim"
package_dir="python/cucim"
package_src_dir="${package_dir}/src/${package_name}"
CMAKE_BUILD_TYPE="release"
source rapids-configure-sccache
source rapids-date-string
version=$(rapids-generate-version)
commit=$(git rev-parse HEAD)
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
# Patch project metadata files to include the CUDA version suffix and version override.
pyproject_file="${package_dir}/pyproject.toml"
PACKAGE_CUDA_SUFFIX="-${RAPIDS_PY_CUDA_SUFFIX}"
# update package name to have the cuda suffix
sed -i "s/name = \"${package_name}\"/name = \"${package_name}${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file}
echo "${version}" > VERSION
sed -i "/^__git_commit__/ s/= .*/= \"${commit}\"/g" "${package_src_dir}/_version.py"
if [[ ${PACKAGE_CUDA_SUFFIX} == "-cu12" ]]; then
# change pyproject.toml to use CUDA 12.x version of cupy
sed -i "s/cupy-cuda11x/cupy-cuda12x/g" ${pyproject_file}
fi
# Install pip build dependencies (not yet using pyproject.toml)
rapids-dependency-file-generator \
--file_key "py_build" \
--output "requirements" \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee build_requirements.txt
pip install -r build_requirements.txt
# First build the C++ lib using CMake via the run script
./run build_local all ${CMAKE_BUILD_TYPE}
cd "${package_dir}"
python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check
mkdir -p final_dist
python -m auditwheel repair -w final_dist dist/*
RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 final_dist
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/check_style.sh | #!/bin/bash
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create checks conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key checks \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n checks
conda activate checks
# Run pre-commit checks
pre-commit run --hook-stage manual --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/test_wheel.sh | #!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -eou pipefail
RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME="cucim_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist
# echo to expand wildcard before adding `[extra]` requires for pip
python -m pip install $(echo ./dist/cucim*.whl)[test]
# Run smoke tests for aarch64 pull requests
if [[ "$(arch)" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then
python ./ci/wheel_smoke_test.py
else
# TODO: revisit enabling imagecodecs package during testing
python -m pytest ./python/cucim
fi
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/wheel_smoke_test.py | import cupy as cp
import cucim
import cucim.skimage
if __name__ == "__main__":
# verify that all top-level modules are available
assert cucim.is_available("clara")
assert cucim.is_available("core")
assert cucim.is_available("skimage")
# generate a synthetic image and apply a filter
img = cucim.skimage.data.binary_blobs(length=512, n_dim=2)
assert isinstance(img, cp.ndarray)
assert img.dtype.kind == "b"
assert img.shape == (512, 512)
eroded = cucim.skimage.morphology.binary_erosion(
img, cp.ones((3, 3), dtype=bool)
)
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/build_cpp.sh | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
version=$(rapids-generate-version)
rapids-logger "Begin cpp build"
RAPIDS_PACKAGE_VERSION=${version} rapids-conda-retry mambabuild conda/recipes/libcucim
rapids-upload-conda-to-s3 cpp
| 0 |
rapidsai_public_repos/cucim | rapidsai_public_repos/cucim/ci/build_docs.sh | #!/bin/bash
set -euo pipefail
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key docs \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n docs
conda activate docs
rapids-print-env
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
cucim libcucim
export RAPIDS_VERSION_NUMBER="23.12"
export RAPIDS_DOCS_DIR="$(mktemp -d)"
rapids-logger "Build Python docs"
pushd docs
sphinx-build -b dirhtml ./source _html
sphinx-build -b text ./source _text
mkdir -p "${RAPIDS_DOCS_DIR}/cucim/"{html,txt}
mv _html/* "${RAPIDS_DOCS_DIR}/cucim/html"
mv _text/* "${RAPIDS_DOCS_DIR}/cucim/txt"
popd
rapids-upload-docs
| 0 |
rapidsai_public_repos/cucim/ci | rapidsai_public_repos/cucim/ci/release/update-version.sh | #!/bin/bash
#########################
# cuCIM Version Updater #
#########################
## Usage
# bash update-version.sh <new_version>
# Format is YY.MM.PP - no leading 'v' or trailing 'a'
NEXT_FULL_TAG=$1
# Get current version
CURRENT_TAG=$(git tag --merged HEAD | grep -xE '^v.*' | sort --version-sort | tail -n 1 | tr -d 'v')
CURRENT_MAJOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[1]}')
CURRENT_MINOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[2]}')
CURRENT_PATCH=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[3]}' | tr -d 'a')
CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}
CURRENT_LONG_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}.${CURRENT_PATCH}
#Get <major>.<minor> for next version
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG"
# Inplace sed replace; workaround for Linux and Mac
function sed_runner() {
sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak
}
# RTD update
sed_runner 's/version = .*/version = '"'${NEXT_SHORT_TAG}'"'/g' docs/source/conf.py
sed_runner 's/release = .*/release = '"'${NEXT_FULL_TAG}'"'/g' docs/source/conf.py
# Centralized version file update
echo "${NEXT_FULL_TAG}" > VERSION
sed_runner "s#\[Version ${CURRENT_LONG_TAG}\](release_notes/v${CURRENT_LONG_TAG}.md)#\[Version ${NEXT_FULL_TAG}\](release_notes/v${NEXT_FULL_TAG}.md)#g" python/cucim/docs/index.md
sed_runner "s/v${CURRENT_LONG_TAG}/v${NEXT_FULL_TAG}/g" python/cucim/docs/getting_started/index.md
sed_runner "s#cucim.kit.cuslide@${CURRENT_LONG_TAG}.so#cucim.kit.cuslide@${NEXT_FULL_TAG}.so#g" python/cucim/docs/getting_started/index.md
sed_runner "s#cucim.kit.cuslide@${CURRENT_LONG_TAG}.so#cucim.kit.cuslide@${NEXT_FULL_TAG}.so#g" cucim.code-workspace
sed_runner "s#branch-${CURRENT_MAJOR}.${CURRENT_MINOR}#branch-${NEXT_MAJOR}.${NEXT_MINOR}#g" README.md
sed_runner "s#branch-${CURRENT_MAJOR}.${CURRENT_MINOR}#branch-${NEXT_MAJOR}.${NEXT_MINOR}#g" python/cucim/README.md
sed_runner "s#branch-${CURRENT_MAJOR}.${CURRENT_MINOR}#branch-${NEXT_MAJOR}.${NEXT_MINOR}#g" python/cucim/pyproject.toml
for FILE in .github/workflows/*.yaml; do
sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" "${FILE}"
done
sed_runner "s/RAPIDS_VERSION_NUMBER=\".*/RAPIDS_VERSION_NUMBER=\"${NEXT_SHORT_TAG}\"/g" ci/build_docs.sh
| 0 |
rapidsai_public_repos/cucim/examples | rapidsai_public_repos/cucim/examples/python/distance_transform_edt_demo.py | import math
import cupy as cp
import numpy as np
try:
import colorcet
import matplotlib.pyplot as plt
except ImportError as e:
print("This demo requires the matplotlib and colorcet packages.")
raise (e)
from skimage import data
from cucim.core.operations.morphology import distance_transform_edt
from cucim.skimage.color import label2rgb
from cucim.skimage.segmentation import relabel_sequential
def coords_to_labels(coords):
"""
Convert coordinate output of distance_transform_edt to unique region
labels.
"""
if coords.shape[0] != 2:
raise ValueError("this utility function assumes 2D coordinates")
# create a set of unique integer labels based on coordinates
labels = coords[1] + (coords[0].max() + 1)
labels += coords[0]
# convert to sequential labels
return relabel_sequential(labels)[0]
shape = (200, 200)
size = math.prod(shape)
ntrue = 0.001 * size
p_true = ntrue / size
p_false = 1 - p_true
# generate a sparse set of background points
cp.random.seed(123)
image = cp.random.choice([0, 1], size=shape, p=(p_false, p_true))
distances, coords = distance_transform_edt(
image == 0, return_distances=True, return_indices=True
)
# plt.figure(); plt.show(distances.get()); plt.show()
# create "labels" image based on locations of unique coordinates
labels = coords_to_labels(coords)
# Note: The code above this point should be fast on the GPU, but the
# code below for visualizing the colored Voronoi cells has not been
# optimized and may run slowly for larger image sizes.
# Colorize the labels image, using a suitable categorical colormap
rgb_labels = label2rgb(labels, colors=colorcet.cm.glasbey.colors)
# copy to host and visualize results
image, distances, coords, rgb_labels = map(
cp.asnumpy, (image, distances, coords, rgb_labels)
)
# set original point locations in rgb_labels to white
xx, yy = np.where(image)
for x, y in zip(xx, yy):
rgb_labels[x, y, :] = 1
fig, axes = plt.subplots(2, 3, figsize=(8, 7))
axes[0][0].imshow(image, cmap=plt.cm.gray)
axes[0][0].set_title("seed points")
axes[0][1].imshow(distances, cmap=plt.cm.gray)
axes[0][1].set_title("Euclidean distance\n(to nearest seed)")
axes[1][0].imshow(coords[0], cmap=plt.cm.gray)
axes[1][0].set_title("y coordinate\nof neareset seed")
axes[1][1].imshow(coords[1], cmap=plt.cm.gray)
axes[1][1].set_title("x coordinate\nof neareset seed")
axes[1][2].imshow(rgb_labels)
axes[1][2].set_title("discrete Voronoi")
for ax in axes.ravel():
ax.set_axis_off()
# overlay larger markers at the seed points for better visibility
for x, y in zip(xx, yy):
# overlay in image
axes[0, 0].plot(y, x, "w.")
# overlay in rgb_labels
axes[1, 2].plot(y, x, "w.")
plt.tight_layout()
"""
As a second demo, we apply the distance transform to a binary image of a
horse (and its inverse). The distance transform computes the Euclidean distance
from each foreground point to the nearest background point.
"""
horse = data.horse()
horse_inv = ~horse
distances = distance_transform_edt(
cp.asarray(horse), return_distances=True, return_indices=False
)
distances_inv = distance_transform_edt(
cp.asarray(horse_inv), return_distances=True, return_indices=False
)
distances = cp.asnumpy(distances)
distances_inv = cp.asnumpy(distances_inv)
fig, axes = plt.subplots(2, 2, figsize=(7, 7))
axes[0][0].imshow(horse_inv, cmap=plt.cm.gray)
axes[0][0].set_title("Foreground horse")
axes[0][1].imshow(horse, cmap=plt.cm.gray)
axes[0][1].set_title("Background horse")
axes[1][0].imshow(distances_inv)
axes[1][0].set_title("Distance\n(foreground horse)")
axes[1][1].imshow(distances)
axes[1][1].set_title("Distance\n(background horse)")
for ax in axes.ravel():
ax.set_axis_off()
plt.tight_layout()
plt.show()
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.