Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h +15 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py +8 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp +195 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh +47 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h +65 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py +73 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py +76 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp +45 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.py +53 -0
- valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/_image_stack.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/_io.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/collection.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/manage_plugins.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/sift.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/util.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/fits_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/gdal_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/imageio_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/imread_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/matplotlib_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/pil_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/simpleitk_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/tifffile_plugin.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/imageio_plugin.py +14 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/matplotlib_plugin.py +220 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/pil_plugin.ini +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/pil_plugin.py +261 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/simpleitk_plugin.ini +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/tifffile_plugin.ini +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/tifffile_plugin.py +74 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/__init__.py +61 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/__pycache__/_regular_grid.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/__pycache__/lookfor.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_invert.py +74 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_label.py +51 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_map_array.py +199 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_montage.py +158 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_regular_grid.py +114 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/_slice_along_axes.py +86 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/apply_parallel.py +213 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/arraycrop.py +72 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/compare.py +132 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/dtype.py +600 -0
- vlmpy310/lib/python3.10/site-packages/skimage/util/lookfor.py +30 -0
.gitattributes
CHANGED
|
@@ -1200,3 +1200,5 @@ vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpytho
|
|
| 1200 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1201 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1202 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 1200 |
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1201 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1202 |
llava_next/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1203 |
+
vlmpy310/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1204 |
+
valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (237 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#define TOP_K_SWITCH(N_TOP_K, ...) \
|
| 7 |
+
[&] { \
|
| 8 |
+
if (1 == N_TOP_K) { \
|
| 9 |
+
constexpr int CONST_TOP_K = 1; \
|
| 10 |
+
__VA_ARGS__(); \
|
| 11 |
+
} else if (2 == N_TOP_K) { \
|
| 12 |
+
constexpr int CONST_TOP_K = 2; \
|
| 13 |
+
__VA_ARGS__(); \
|
| 14 |
+
} \
|
| 15 |
+
}()
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .blocked_kv_rotary import *
|
| 7 |
+
from .blocked_trained_kv_rotary import *
|
| 8 |
+
from .linear_blocked_kv_copy import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "blocked_kv_rotary.h"
|
| 7 |
+
#include "ragged_kernel_helpers.h"
|
| 8 |
+
|
| 9 |
+
#define DISPATCH_KV_ROTARY(T_TYPE, C_TYPE) \
|
| 10 |
+
if (q.options().dtype() == torch::T_TYPE) { \
|
| 11 |
+
launch_kv_rotary_kernel<C_TYPE>((C_TYPE*)kv_cache.data_ptr(), \
|
| 12 |
+
(C_TYPE*)q.data_ptr(), \
|
| 13 |
+
(C_TYPE*)k.data_ptr(), \
|
| 14 |
+
(C_TYPE*)v.data_ptr(), \
|
| 15 |
+
(C_TYPE*)inv_freq_ptr, \
|
| 16 |
+
rotary_dim, \
|
| 17 |
+
theta_base, \
|
| 18 |
+
batch_wrapper, \
|
| 19 |
+
qkv_stride, \
|
| 20 |
+
kv_cache_stride, \
|
| 21 |
+
v_offset, \
|
| 22 |
+
inv_freq_stride, \
|
| 23 |
+
q_ratio, \
|
| 24 |
+
head_size, \
|
| 25 |
+
n_tokens, \
|
| 26 |
+
n_q_heads, \
|
| 27 |
+
at::cuda::getCurrentCUDAStream()); \
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/*
|
| 31 |
+
Rotary position embeddings + copy into KV cache. This implementation assumes
|
| 32 |
+
that the inverse frequencies should be ready from global memory rather than
|
| 33 |
+
synthesized in the kernel.
|
| 34 |
+
|
| 35 |
+
Arguments:
|
| 36 |
+
kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
|
| 37 |
+
q: [n_tokens, n_q_heads * head_size]
|
| 38 |
+
k: [n_tokens, n_kv_heads * head_size]
|
| 39 |
+
v: [n_tokens, n_kv_heads * head_size]
|
| 40 |
+
inv_freq: [max_seq_len, head_size // 2]
|
| 41 |
+
*/
|
| 42 |
+
void kv_trained_rotary_embeddings(torch::Tensor& kv_cache,
|
| 43 |
+
torch::Tensor& q,
|
| 44 |
+
torch::Tensor& k,
|
| 45 |
+
torch::Tensor& v,
|
| 46 |
+
torch::Tensor& inv_freq,
|
| 47 |
+
torch::Tensor& batch_metadata,
|
| 48 |
+
torch::Tensor& seq_metadata,
|
| 49 |
+
torch::Tensor& tokens_to_seq,
|
| 50 |
+
torch::Tensor& kv_ptrs)
|
| 51 |
+
{
|
| 52 |
+
const int32_t n_tokens = q.size(0);
|
| 53 |
+
TORCH_CHECK(n_tokens == k.size(0));
|
| 54 |
+
TORCH_CHECK(n_tokens == v.size(0));
|
| 55 |
+
|
| 56 |
+
const float theta_base = 0.f;
|
| 57 |
+
const int32_t rotary_dim = inv_freq.size(0) * 2;
|
| 58 |
+
|
| 59 |
+
// Dimensions
|
| 60 |
+
const int32_t block_size = kv_cache.size(1);
|
| 61 |
+
const int32_t n_kv_heads = kv_cache.size(3);
|
| 62 |
+
const int32_t head_size = kv_cache.size(4);
|
| 63 |
+
|
| 64 |
+
// Strides
|
| 65 |
+
const int32_t qkv_stride = q.stride(0); // Per token
|
| 66 |
+
const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
|
| 67 |
+
const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
|
| 68 |
+
const int32_t inv_freq_stride = inv_freq.stride(0); // Per token idx
|
| 69 |
+
|
| 70 |
+
const int n_q_heads = q.size(1) / head_size;
|
| 71 |
+
const int q_ratio = n_q_heads / n_kv_heads;
|
| 72 |
+
|
| 73 |
+
void* inv_freq_ptr = (void*)inv_freq.data_ptr();
|
| 74 |
+
|
| 75 |
+
BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
|
| 76 |
+
batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
|
| 77 |
+
|
| 78 |
+
DISPATCH_KV_ROTARY(kHalf, __half);
|
| 79 |
+
|
| 80 |
+
#ifdef BF16_AVAILABLE
|
| 81 |
+
DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16);
|
| 82 |
+
#endif
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
/*
|
| 86 |
+
Rotary position embeddings + copy into KV cache. This implementation assumes
|
| 87 |
+
that the inverse frequencies should be synthesized in the kernel.
|
| 88 |
+
|
| 89 |
+
Arguments:
|
| 90 |
+
kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
|
| 91 |
+
q: [n_tokens, n_q_heads * head_size]
|
| 92 |
+
k: [n_tokens, n_kv_heads * head_size]
|
| 93 |
+
v: [n_tokens, n_kv_heads * head_size]
|
| 94 |
+
*/
|
| 95 |
+
void kv_rotary_embeddings(torch::Tensor& kv_cache,
|
| 96 |
+
torch::Tensor& q,
|
| 97 |
+
torch::Tensor& k,
|
| 98 |
+
torch::Tensor& v,
|
| 99 |
+
const int32_t rotary_dim,
|
| 100 |
+
const float theta_base,
|
| 101 |
+
torch::Tensor& batch_metadata,
|
| 102 |
+
torch::Tensor& seq_metadata,
|
| 103 |
+
torch::Tensor& tokens_to_seq,
|
| 104 |
+
torch::Tensor& kv_ptrs)
|
| 105 |
+
{
|
| 106 |
+
const int32_t n_tokens = q.size(0);
|
| 107 |
+
TORCH_CHECK(n_tokens == k.size(0));
|
| 108 |
+
TORCH_CHECK(n_tokens == v.size(0));
|
| 109 |
+
|
| 110 |
+
// Dimensions
|
| 111 |
+
const int32_t block_size = kv_cache.size(1);
|
| 112 |
+
const int32_t n_kv_heads = kv_cache.size(3);
|
| 113 |
+
const int32_t head_size = kv_cache.size(4);
|
| 114 |
+
|
| 115 |
+
// Strides
|
| 116 |
+
const int32_t qkv_stride = q.stride(0); // Per token
|
| 117 |
+
const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
|
| 118 |
+
const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
|
| 119 |
+
const int32_t inv_freq_stride = 0; // Per token idx
|
| 120 |
+
|
| 121 |
+
const int n_q_heads = q.size(1) / head_size;
|
| 122 |
+
const int q_ratio = n_q_heads / n_kv_heads;
|
| 123 |
+
|
| 124 |
+
void* inv_freq_ptr = nullptr;
|
| 125 |
+
|
| 126 |
+
BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
|
| 127 |
+
batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
|
| 128 |
+
|
| 129 |
+
DISPATCH_KV_ROTARY(kHalf, __half);
|
| 130 |
+
|
| 131 |
+
#ifdef BF16_AVAILABLE
|
| 132 |
+
DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16);
|
| 133 |
+
#endif
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
#define DISPATCH_KV_COPY(T_TYPE, C_TYPE) \
|
| 137 |
+
if (q.options().dtype() == torch::T_TYPE) { \
|
| 138 |
+
launch_kv_copy_kernel<C_TYPE>((C_TYPE*)kv_cache.data_ptr(), \
|
| 139 |
+
(C_TYPE*)q.data_ptr(), \
|
| 140 |
+
(C_TYPE*)k.data_ptr(), \
|
| 141 |
+
(C_TYPE*)v.data_ptr(), \
|
| 142 |
+
batch_wrapper, \
|
| 143 |
+
qkv_stride, \
|
| 144 |
+
kv_cache_stride, \
|
| 145 |
+
v_offset, \
|
| 146 |
+
q_ratio, \
|
| 147 |
+
head_size, \
|
| 148 |
+
n_tokens, \
|
| 149 |
+
n_q_heads, \
|
| 150 |
+
at::cuda::getCurrentCUDAStream()); \
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
/*
|
| 154 |
+
Copy into linear KV cache.
|
| 155 |
+
*/
|
| 156 |
+
void linear_kv_copy(torch::Tensor& kv_cache,
|
| 157 |
+
torch::Tensor& q,
|
| 158 |
+
torch::Tensor& k,
|
| 159 |
+
torch::Tensor& v,
|
| 160 |
+
torch::Tensor& batch_metadata,
|
| 161 |
+
torch::Tensor& seq_metadata,
|
| 162 |
+
torch::Tensor& tokens_to_seq,
|
| 163 |
+
torch::Tensor& kv_ptrs)
|
| 164 |
+
{
|
| 165 |
+
const int32_t n_tokens = q.size(0);
|
| 166 |
+
TORCH_CHECK(n_tokens == k.size(0));
|
| 167 |
+
TORCH_CHECK(n_tokens == v.size(0));
|
| 168 |
+
|
| 169 |
+
// Dimensions
|
| 170 |
+
const int32_t block_size = kv_cache.size(1);
|
| 171 |
+
const int32_t n_kv_heads = kv_cache.size(3);
|
| 172 |
+
const int32_t head_size = kv_cache.size(4);
|
| 173 |
+
|
| 174 |
+
// Strides
|
| 175 |
+
const int32_t qkv_stride = q.stride(0); // Per token
|
| 176 |
+
TORCH_CHECK(qkv_stride == k.stride(0));
|
| 177 |
+
TORCH_CHECK(qkv_stride == v.stride(0));
|
| 178 |
+
|
| 179 |
+
const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
|
| 180 |
+
const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
|
| 181 |
+
|
| 182 |
+
const int n_q_heads = q.size(1) / head_size;
|
| 183 |
+
|
| 184 |
+
TORCH_CHECK(n_q_heads % n_kv_heads == 0);
|
| 185 |
+
const int q_ratio = n_q_heads / n_kv_heads;
|
| 186 |
+
|
| 187 |
+
BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
|
| 188 |
+
batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
|
| 189 |
+
|
| 190 |
+
DISPATCH_KV_COPY(kHalf, __half);
|
| 191 |
+
|
| 192 |
+
#ifdef BF16_AVAILABLE
|
| 193 |
+
DISPATCH_KV_COPY(kBFloat16, __nv_bfloat16);
|
| 194 |
+
#endif
|
| 195 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
#include "ragged_dtypes.h"
|
| 10 |
+
|
| 11 |
+
#ifdef BF16_AVAILABLE
|
| 12 |
+
#include <cuda_bf16.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
template <typename T>
|
| 16 |
+
void launch_kv_rotary_kernel(T* kv_cache,
|
| 17 |
+
T* q,
|
| 18 |
+
T* k,
|
| 19 |
+
T* v,
|
| 20 |
+
T* inv_freq,
|
| 21 |
+
const int32_t rotary_dim,
|
| 22 |
+
const float theta_base,
|
| 23 |
+
const BatchWrapperCPP batch_desc,
|
| 24 |
+
const int qkv_stride,
|
| 25 |
+
const int kv_cache_stride,
|
| 26 |
+
const int v_offset,
|
| 27 |
+
const int inv_freq_stride,
|
| 28 |
+
const int q_ratio,
|
| 29 |
+
const int head_size,
|
| 30 |
+
const int n_tokens,
|
| 31 |
+
const int n_q_heads,
|
| 32 |
+
cudaStream_t stream);
|
| 33 |
+
|
| 34 |
+
template <typename T>
|
| 35 |
+
void launch_kv_copy_kernel(T* kv_cache,
|
| 36 |
+
T* q,
|
| 37 |
+
T* k,
|
| 38 |
+
T* v,
|
| 39 |
+
const BatchWrapperCPP batch_desc,
|
| 40 |
+
const int qkv_stride,
|
| 41 |
+
const int kv_cache_stride,
|
| 42 |
+
const int v_offset,
|
| 43 |
+
const int q_ratio,
|
| 44 |
+
const int head_size,
|
| 45 |
+
const int n_tokens,
|
| 46 |
+
const int n_q_heads,
|
| 47 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/cuda/CUDAStream.h>
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include "blocked_kv_rotary.cuh"
|
| 11 |
+
|
| 12 |
+
/*
|
| 13 |
+
Rotary position embeddings + copy into KV cache. This implementation assumes
|
| 14 |
+
that the inverse frequencies should be ready from global memory rather than
|
| 15 |
+
synthesized in the kernel.
|
| 16 |
+
|
| 17 |
+
Arguments:
|
| 18 |
+
kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
|
| 19 |
+
q: [n_tokens, n_q_heads * head_size]
|
| 20 |
+
k: [n_tokens, n_kv_heads * head_size]
|
| 21 |
+
v: [n_tokens, n_kv_heads * head_size]
|
| 22 |
+
inv_freq: [max_seq_len, head_size // 2]
|
| 23 |
+
*/
|
| 24 |
+
void kv_trained_rotary_embeddings(torch::Tensor& kv_cache,
|
| 25 |
+
torch::Tensor& q,
|
| 26 |
+
torch::Tensor& k,
|
| 27 |
+
torch::Tensor& v,
|
| 28 |
+
torch::Tensor& inv_freq,
|
| 29 |
+
torch::Tensor& batch_metadata,
|
| 30 |
+
torch::Tensor& seq_metadata,
|
| 31 |
+
torch::Tensor& tokens_to_seq,
|
| 32 |
+
torch::Tensor& kv_ptrs);
|
| 33 |
+
|
| 34 |
+
/*
|
| 35 |
+
Rotary position embeddings + copy into KV cache. This implementation assumes
|
| 36 |
+
that the inverse frequencies should be synthesized in the kernel.
|
| 37 |
+
|
| 38 |
+
Arguments:
|
| 39 |
+
kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
|
| 40 |
+
q: [n_tokens, n_q_heads * head_size]
|
| 41 |
+
k: [n_tokens, n_kv_heads * head_size]
|
| 42 |
+
v: [n_tokens, n_kv_heads * head_size]
|
| 43 |
+
*/
|
| 44 |
+
void kv_rotary_embeddings(torch::Tensor& kv_cache,
|
| 45 |
+
torch::Tensor& q,
|
| 46 |
+
torch::Tensor& k,
|
| 47 |
+
torch::Tensor& v,
|
| 48 |
+
const int32_t rotary_dim,
|
| 49 |
+
const float theta_base,
|
| 50 |
+
torch::Tensor& batch_metadata,
|
| 51 |
+
torch::Tensor& seq_metadata,
|
| 52 |
+
torch::Tensor& tokens_to_seq,
|
| 53 |
+
torch::Tensor& kv_ptrs);
|
| 54 |
+
|
| 55 |
+
/*
|
| 56 |
+
Copy into linear KV cache.
|
| 57 |
+
*/
|
| 58 |
+
void linear_kv_copy(torch::Tensor& kv_cache,
|
| 59 |
+
torch::Tensor& q,
|
| 60 |
+
torch::Tensor& k,
|
| 61 |
+
torch::Tensor& v,
|
| 62 |
+
torch::Tensor& batch_metadata,
|
| 63 |
+
torch::Tensor& seq_metadata,
|
| 64 |
+
torch::Tensor& tokens_to_seq,
|
| 65 |
+
torch::Tensor& kv_ptrs);
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ....inference_utils import DtypeEnum
|
| 9 |
+
from deepspeed.ops.op_builder import RaggedOpsBuilder
|
| 10 |
+
from ....ragged import RaggedBatchWrapper
|
| 11 |
+
from ... import DSKernelBase
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BlockedRotaryEmbeddings(DSKernelBase):
|
| 15 |
+
"""
|
| 16 |
+
CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
|
| 17 |
+
before copying into a blocked KV cache.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
|
| 21 |
+
supported_head_sizes = [64, 80, 128]
|
| 22 |
+
supported_q_ratios = [1, 2, 4, 5, 8, 16, 29, 35, 36, 71]
|
| 23 |
+
|
| 24 |
+
def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype, rotary_dim: int,
|
| 25 |
+
theta_base: float) -> None:
|
| 26 |
+
"""
|
| 27 |
+
Args:
|
| 28 |
+
head_size: The size of the attention head.
|
| 29 |
+
q_ratio: Ratio of q heads to kv heads (for GQA)
|
| 30 |
+
dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
q_ratio = n_q_heads // n_kv_heads
|
| 34 |
+
|
| 35 |
+
if head_size not in BlockedRotaryEmbeddings.supported_head_sizes:
|
| 36 |
+
raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
|
| 37 |
+
head_size, BlockedRotaryEmbeddings.supported_head_sizes))
|
| 38 |
+
|
| 39 |
+
if q_ratio not in BlockedRotaryEmbeddings.supported_q_ratios:
|
| 40 |
+
raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
|
| 41 |
+
q_ratio, BlockedRotaryEmbeddings.supported_q_ratios))
|
| 42 |
+
|
| 43 |
+
if not isinstance(dtype, DtypeEnum):
|
| 44 |
+
dtype = DtypeEnum(dtype)
|
| 45 |
+
|
| 46 |
+
if dtype not in BlockedRotaryEmbeddings.supported_dtypes:
|
| 47 |
+
raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
|
| 48 |
+
dtype, BlockedRotaryEmbeddings.supported_dtypes))
|
| 49 |
+
|
| 50 |
+
inf_module = RaggedOpsBuilder().load()
|
| 51 |
+
self.kernel = inf_module.kv_rotary_embeddings
|
| 52 |
+
self.head_size = head_size
|
| 53 |
+
self.n_q_heads = n_q_heads
|
| 54 |
+
self.n_kv_heads = n_kv_heads
|
| 55 |
+
self.rotary_dim = rotary_dim
|
| 56 |
+
self.theta_base = theta_base
|
| 57 |
+
|
| 58 |
+
def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None:
|
| 59 |
+
"""
|
| 60 |
+
Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
|
| 64 |
+
qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
|
| 65 |
+
ragged_batch: Wrapper for the ragged batch.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
q = qkv[:, :self.head_size * self.n_q_heads]
|
| 69 |
+
k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
|
| 70 |
+
v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
|
| 71 |
+
|
| 72 |
+
self.kernel(kv_cache, q, k, v, self.rotary_dim, self.theta_base, ragged_batch.batch_metadata_buffer(),
|
| 73 |
+
ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
# Copyright (c) Microsoft Corporation.
|
| 7 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 8 |
+
|
| 9 |
+
# DeepSpeed Team
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
from ....inference_utils import DtypeEnum
|
| 14 |
+
from deepspeed.ops.op_builder import RaggedOpsBuilder
|
| 15 |
+
from ....ragged import RaggedBatchWrapper
|
| 16 |
+
from ... import DSKernelBase
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class BlockedTrainedRotaryEmbeddings(DSKernelBase):
|
| 20 |
+
"""
|
| 21 |
+
CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
|
| 22 |
+
before copying into a blocked KV cache.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
|
| 26 |
+
supported_head_sizes = [64, 80, 128]
|
| 27 |
+
supported_q_ratios = [1, 2, 4, 5, 8]
|
| 28 |
+
|
| 29 |
+
def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None:
|
| 30 |
+
"""
|
| 31 |
+
Args:
|
| 32 |
+
head_size: The size of the attention head.
|
| 33 |
+
dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
q_ratio = n_q_heads // n_kv_heads
|
| 37 |
+
|
| 38 |
+
if head_size not in BlockedTrainedRotaryEmbeddings.supported_head_sizes:
|
| 39 |
+
raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
|
| 40 |
+
head_size, BlockedTrainedRotaryEmbeddings.supported_head_sizes))
|
| 41 |
+
|
| 42 |
+
if q_ratio not in BlockedTrainedRotaryEmbeddings.supported_q_ratios:
|
| 43 |
+
raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
|
| 44 |
+
q_ratio, BlockedTrainedRotaryEmbeddings.supported_q_ratios))
|
| 45 |
+
|
| 46 |
+
if not isinstance(dtype, DtypeEnum):
|
| 47 |
+
dtype = DtypeEnum(dtype)
|
| 48 |
+
|
| 49 |
+
if dtype not in BlockedTrainedRotaryEmbeddings.supported_dtypes:
|
| 50 |
+
raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
|
| 51 |
+
dtype, BlockedTrainedRotaryEmbeddings.supported_dtypes))
|
| 52 |
+
|
| 53 |
+
inf_module = RaggedOpsBuilder().load()
|
| 54 |
+
self.kernel = inf_module.kv_trained_rotary_embeddings
|
| 55 |
+
self.head_size = head_size
|
| 56 |
+
self.n_q_heads = n_q_heads
|
| 57 |
+
self.n_kv_heads = n_kv_heads
|
| 58 |
+
|
| 59 |
+
def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper,
|
| 60 |
+
inverse_freqs: torch.Tensor) -> None:
|
| 61 |
+
"""
|
| 62 |
+
Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
|
| 66 |
+
qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
|
| 67 |
+
ragged_batch: Wrapper for the ragged batch.
|
| 68 |
+
inverse_freqs: Inverse frequencies for the rotary embeddings. Shape [max_seq_len, rotary_dim // 2]
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
q = qkv[:, :self.head_size * self.n_q_heads]
|
| 72 |
+
k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
|
| 73 |
+
v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
|
| 74 |
+
|
| 75 |
+
self.kernel(kv_cache, q, k, v, inverse_freqs, ragged_batch.batch_metadata_buffer(),
|
| 76 |
+
ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "logits_gather.h"
|
| 7 |
+
|
| 8 |
+
#define DISPATCH_TO_LOGITS_GATHER(T_TYPE, C_TYPE) \
|
| 9 |
+
if (all_acts.options().dtype() == torch::T_TYPE) { \
|
| 10 |
+
launch_logits_gather((C_TYPE*)final_token_acts.data_ptr(), \
|
| 11 |
+
(const C_TYPE*)all_acts.data_ptr(), \
|
| 12 |
+
batch_metadata_raw, \
|
| 13 |
+
seq_metadata_raw, \
|
| 14 |
+
n_seqs, \
|
| 15 |
+
embed_dim, \
|
| 16 |
+
at::cuda::getCurrentCUDAStream()); \
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
/*
|
| 20 |
+
Logits gather will parse the ragged batch data structure and gather only the logits that
|
| 21 |
+
will be used for token sampling.
|
| 22 |
+
*/
|
| 23 |
+
void gather_for_logits(torch::Tensor& final_token_acts,
|
| 24 |
+
torch::Tensor& all_acts,
|
| 25 |
+
torch::Tensor& batch_metadata,
|
| 26 |
+
torch::Tensor& seq_metadata)
|
| 27 |
+
{
|
| 28 |
+
const RaggedBatchDescriptor* batch_metadata_raw =
|
| 29 |
+
reinterpret_cast<const RaggedBatchDescriptor*>(batch_metadata.data_ptr());
|
| 30 |
+
|
| 31 |
+
const InflightSeqDescriptor* seq_metadata_raw =
|
| 32 |
+
reinterpret_cast<const InflightSeqDescriptor*>(seq_metadata.data_ptr());
|
| 33 |
+
|
| 34 |
+
const int n_seqs = final_token_acts.size(0);
|
| 35 |
+
const int embed_dim = final_token_acts.size(1);
|
| 36 |
+
|
| 37 |
+
TORCH_CHECK(all_acts.scalar_type() == final_token_acts.scalar_type(),
|
| 38 |
+
"all_acts and final_token_acts must have the same scalar type");
|
| 39 |
+
|
| 40 |
+
DISPATCH_TO_LOGITS_GATHER(kFloat, float)
|
| 41 |
+
DISPATCH_TO_LOGITS_GATHER(kHalf, half)
|
| 42 |
+
#ifdef BF16_AVAILABLE
|
| 43 |
+
DISPATCH_TO_LOGITS_GATHER(kBFloat16, __nv_bfloat16)
|
| 44 |
+
#endif
|
| 45 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ... import DSKernelBase
|
| 9 |
+
from ....inference_utils import DtypeEnum
|
| 10 |
+
from deepspeed.ops.op_builder import RaggedOpsBuilder
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MoEGather(DSKernelBase):
|
| 14 |
+
"""
|
| 15 |
+
CUDA implementation of MoE gather. This will bring the tokens back
|
| 16 |
+
to their original indices and perform the output scaling.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
|
| 20 |
+
|
| 21 |
+
def __init__(self, dtype: DtypeEnum, channels: int, normalize_scores: bool = False) -> None:
|
| 22 |
+
|
| 23 |
+
if not isinstance(dtype, DtypeEnum):
|
| 24 |
+
dtype = DtypeEnum(dtype)
|
| 25 |
+
|
| 26 |
+
if dtype not in MoEGather.supported_dtypes:
|
| 27 |
+
raise RuntimeError(f"Unsupported dtype {dtype}")
|
| 28 |
+
|
| 29 |
+
if channels % 8 != 0:
|
| 30 |
+
raise RuntimeError(f"Channels {channels} must be divisible by 8")
|
| 31 |
+
|
| 32 |
+
inf_module = RaggedOpsBuilder().load()
|
| 33 |
+
self.kernel = inf_module.moe_gather
|
| 34 |
+
self.normalize_scores = normalize_scores
|
| 35 |
+
|
| 36 |
+
def __call__(self, layer_output: torch.Tensor, moe_output: torch.Tensor, scores: torch.Tensor,
|
| 37 |
+
mapped_slots: torch.Tensor, expert_counts: torch.Tensor) -> torch.Tensor:
|
| 38 |
+
"""
|
| 39 |
+
Reorders the moe_output tokens into their original order and scales them by their
|
| 40 |
+
gating scale. This will be a no-op for padded tokens.
|
| 41 |
+
|
| 42 |
+
Arguments:
|
| 43 |
+
layer_output (torch.Tensor): The output of the layer of shape [n_tokens, hidden_size]. This has been scaled appropriately.
|
| 44 |
+
moe_output (torch.Tensor): The output of the MoE of shape [n_tokens * n_top_k, hidden_size].
|
| 45 |
+
scores (torch.Tensor): The gating scores of shape [n_tokens].
|
| 46 |
+
mapped_slots (torch.Tensor): The index of the token in the expert's input of shape [n_tokens, n_top_k]. The indices of token ``i`` in layer_output is ``mapped_slots[i]``.
|
| 47 |
+
expert_counts (torch.Tensor): The number of tokens assigned to each expert of shape [n_experts]. This is passed to fuse the clearing of this data structure into the gather.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
layer_output
|
| 51 |
+
"""
|
| 52 |
+
self.kernel(layer_output, moe_output, scores, mapped_slots, expert_counts, self.normalize_scores)
|
| 53 |
+
return layer_output
|
valley/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:242b9dba953ae2e4878d66032624135a9118a1616ca24588ed586d4bcc475c69
|
| 3 |
+
size 108421928
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:801c49b24de8fc0ba1bf08b8b1fbba382818127ed9040037a2a90f68b7155f02
|
| 3 |
+
size 267568
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/_image_stack.cpython-310.pyc
ADDED
|
Binary file (830 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/_io.cpython-310.pyc
ADDED
|
Binary file (7.86 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/collection.cpython-310.pyc
ADDED
|
Binary file (15.6 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/manage_plugins.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/sift.cpython-310.pyc
ADDED
|
Binary file (2.28 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/fits_plugin.cpython-310.pyc
ADDED
|
Binary file (3.61 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/gdal_plugin.cpython-310.pyc
ADDED
|
Binary file (550 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/imageio_plugin.cpython-310.pyc
ADDED
|
Binary file (549 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/imread_plugin.cpython-310.pyc
ADDED
|
Binary file (1.16 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/matplotlib_plugin.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/pil_plugin.cpython-310.pyc
ADDED
|
Binary file (6.27 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/simpleitk_plugin.cpython-310.pyc
ADDED
|
Binary file (761 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/__pycache__/tifffile_plugin.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/imageio_plugin.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ['imread', 'imsave']
|
| 2 |
+
|
| 3 |
+
from functools import wraps
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from imageio.v3 import imread as imageio_imread, imwrite as imsave
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@wraps(imageio_imread)
|
| 10 |
+
def imread(*args, **kwargs):
|
| 11 |
+
out = np.asarray(imageio_imread(*args, **kwargs))
|
| 12 |
+
if not out.flags['WRITEABLE']:
|
| 13 |
+
out = out.copy()
|
| 14 |
+
return out
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/matplotlib_plugin.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import namedtuple
|
| 2 |
+
import numpy as np
|
| 3 |
+
from ...util import dtype as dtypes
|
| 4 |
+
from ...exposure import is_low_contrast
|
| 5 |
+
from ..._shared.utils import warn
|
| 6 |
+
from math import floor, ceil
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_default_colormap = 'gray'
|
| 10 |
+
_nonstandard_colormap = 'viridis'
|
| 11 |
+
_diverging_colormap = 'RdBu'
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
ImageProperties = namedtuple(
|
| 15 |
+
'ImageProperties',
|
| 16 |
+
['signed', 'out_of_range_float', 'low_data_range', 'unsupported_dtype'],
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _get_image_properties(image):
|
| 21 |
+
"""Determine nonstandard properties of an input image.
|
| 22 |
+
|
| 23 |
+
Parameters
|
| 24 |
+
----------
|
| 25 |
+
image : array
|
| 26 |
+
The input image.
|
| 27 |
+
|
| 28 |
+
Returns
|
| 29 |
+
-------
|
| 30 |
+
ip : ImageProperties named tuple
|
| 31 |
+
The properties of the image:
|
| 32 |
+
|
| 33 |
+
- signed: whether the image has negative values.
|
| 34 |
+
- out_of_range_float: if the image has floating point data
|
| 35 |
+
outside of [-1, 1].
|
| 36 |
+
- low_data_range: if the image is in the standard image
|
| 37 |
+
range (e.g. [0, 1] for a floating point image) but its
|
| 38 |
+
data range would be too small to display with standard
|
| 39 |
+
image ranges.
|
| 40 |
+
- unsupported_dtype: if the image data type is not a
|
| 41 |
+
standard skimage type, e.g. ``numpy.uint64``.
|
| 42 |
+
"""
|
| 43 |
+
immin, immax = np.min(image), np.max(image)
|
| 44 |
+
imtype = image.dtype.type
|
| 45 |
+
try:
|
| 46 |
+
lo, hi = dtypes.dtype_range[imtype]
|
| 47 |
+
except KeyError:
|
| 48 |
+
lo, hi = immin, immax
|
| 49 |
+
|
| 50 |
+
signed = immin < 0
|
| 51 |
+
out_of_range_float = np.issubdtype(image.dtype, np.floating) and (
|
| 52 |
+
immin < lo or immax > hi
|
| 53 |
+
)
|
| 54 |
+
low_data_range = immin != immax and is_low_contrast(image)
|
| 55 |
+
unsupported_dtype = image.dtype not in dtypes._supported_types
|
| 56 |
+
|
| 57 |
+
return ImageProperties(
|
| 58 |
+
signed, out_of_range_float, low_data_range, unsupported_dtype
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _raise_warnings(image_properties):
|
| 63 |
+
"""Raise the appropriate warning for each nonstandard image type.
|
| 64 |
+
|
| 65 |
+
Parameters
|
| 66 |
+
----------
|
| 67 |
+
image_properties : ImageProperties named tuple
|
| 68 |
+
The properties of the considered image.
|
| 69 |
+
"""
|
| 70 |
+
ip = image_properties
|
| 71 |
+
if ip.unsupported_dtype:
|
| 72 |
+
warn(
|
| 73 |
+
"Non-standard image type; displaying image with " "stretched contrast.",
|
| 74 |
+
stacklevel=3,
|
| 75 |
+
)
|
| 76 |
+
if ip.low_data_range:
|
| 77 |
+
warn(
|
| 78 |
+
"Low image data range; displaying image with " "stretched contrast.",
|
| 79 |
+
stacklevel=3,
|
| 80 |
+
)
|
| 81 |
+
if ip.out_of_range_float:
|
| 82 |
+
warn(
|
| 83 |
+
"Float image out of standard range; displaying "
|
| 84 |
+
"image with stretched contrast.",
|
| 85 |
+
stacklevel=3,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _get_display_range(image):
|
| 90 |
+
"""Return the display range for a given set of image properties.
|
| 91 |
+
|
| 92 |
+
Parameters
|
| 93 |
+
----------
|
| 94 |
+
image : array
|
| 95 |
+
The input image.
|
| 96 |
+
|
| 97 |
+
Returns
|
| 98 |
+
-------
|
| 99 |
+
lo, hi : same type as immin, immax
|
| 100 |
+
The display range to be used for the input image.
|
| 101 |
+
cmap : string
|
| 102 |
+
The name of the colormap to use.
|
| 103 |
+
"""
|
| 104 |
+
ip = _get_image_properties(image)
|
| 105 |
+
immin, immax = np.min(image), np.max(image)
|
| 106 |
+
if ip.signed:
|
| 107 |
+
magnitude = max(abs(immin), abs(immax))
|
| 108 |
+
lo, hi = -magnitude, magnitude
|
| 109 |
+
cmap = _diverging_colormap
|
| 110 |
+
elif any(ip):
|
| 111 |
+
_raise_warnings(ip)
|
| 112 |
+
lo, hi = immin, immax
|
| 113 |
+
cmap = _nonstandard_colormap
|
| 114 |
+
else:
|
| 115 |
+
lo = 0
|
| 116 |
+
imtype = image.dtype.type
|
| 117 |
+
hi = dtypes.dtype_range[imtype][1]
|
| 118 |
+
cmap = _default_colormap
|
| 119 |
+
return lo, hi, cmap
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def imshow(image, ax=None, show_cbar=None, **kwargs):
|
| 123 |
+
"""Show the input image and return the current axes.
|
| 124 |
+
|
| 125 |
+
By default, the image is displayed in grayscale, rather than
|
| 126 |
+
the matplotlib default colormap.
|
| 127 |
+
|
| 128 |
+
Images are assumed to have standard range for their type. For
|
| 129 |
+
example, if a floating point image has values in [0, 0.5], the
|
| 130 |
+
most intense color will be gray50, not white.
|
| 131 |
+
|
| 132 |
+
If the image exceeds the standard range, or if the range is too
|
| 133 |
+
small to display, we fall back on displaying exactly the range of
|
| 134 |
+
the input image, along with a colorbar to clearly indicate that
|
| 135 |
+
this range transformation has occurred.
|
| 136 |
+
|
| 137 |
+
For signed images, we use a diverging colormap centered at 0.
|
| 138 |
+
|
| 139 |
+
Parameters
|
| 140 |
+
----------
|
| 141 |
+
image : array, shape (M, N[, 3])
|
| 142 |
+
The image to display.
|
| 143 |
+
ax : `matplotlib.axes.Axes`, optional
|
| 144 |
+
The axis to use for the image, defaults to plt.gca().
|
| 145 |
+
show_cbar : boolean, optional.
|
| 146 |
+
Whether to show the colorbar (used to override default behavior).
|
| 147 |
+
**kwargs : Keyword arguments
|
| 148 |
+
These are passed directly to `matplotlib.pyplot.imshow`.
|
| 149 |
+
|
| 150 |
+
Returns
|
| 151 |
+
-------
|
| 152 |
+
ax_im : `matplotlib.pyplot.AxesImage`
|
| 153 |
+
The `AxesImage` object returned by `plt.imshow`.
|
| 154 |
+
"""
|
| 155 |
+
import matplotlib.pyplot as plt
|
| 156 |
+
from mpl_toolkits.axes_grid1 import make_axes_locatable
|
| 157 |
+
|
| 158 |
+
lo, hi, cmap = _get_display_range(image)
|
| 159 |
+
|
| 160 |
+
kwargs.setdefault('interpolation', 'nearest')
|
| 161 |
+
kwargs.setdefault('cmap', cmap)
|
| 162 |
+
kwargs.setdefault('vmin', lo)
|
| 163 |
+
kwargs.setdefault('vmax', hi)
|
| 164 |
+
|
| 165 |
+
ax = ax or plt.gca()
|
| 166 |
+
ax_im = ax.imshow(image, **kwargs)
|
| 167 |
+
if (cmap != _default_colormap and show_cbar is not False) or show_cbar:
|
| 168 |
+
divider = make_axes_locatable(ax)
|
| 169 |
+
cax = divider.append_axes("right", size="5%", pad=0.05)
|
| 170 |
+
plt.colorbar(ax_im, cax=cax)
|
| 171 |
+
ax.get_figure().tight_layout()
|
| 172 |
+
|
| 173 |
+
return ax_im
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def imshow_collection(ic, *args, **kwargs):
|
| 177 |
+
"""Display all images in the collection.
|
| 178 |
+
|
| 179 |
+
Returns
|
| 180 |
+
-------
|
| 181 |
+
fig : `matplotlib.figure.Figure`
|
| 182 |
+
The `Figure` object returned by `plt.subplots`.
|
| 183 |
+
"""
|
| 184 |
+
import matplotlib.pyplot as plt
|
| 185 |
+
|
| 186 |
+
if len(ic) < 1:
|
| 187 |
+
raise ValueError('Number of images to plot must be greater than 0')
|
| 188 |
+
|
| 189 |
+
# The target is to plot images on a grid with aspect ratio 4:3
|
| 190 |
+
num_images = len(ic)
|
| 191 |
+
# Two pairs of `nrows, ncols` are possible
|
| 192 |
+
k = (num_images * 12) ** 0.5
|
| 193 |
+
r1 = max(1, floor(k / 4))
|
| 194 |
+
r2 = ceil(k / 4)
|
| 195 |
+
c1 = ceil(num_images / r1)
|
| 196 |
+
c2 = ceil(num_images / r2)
|
| 197 |
+
# Select the one which is closer to 4:3
|
| 198 |
+
if abs(r1 / c1 - 0.75) < abs(r2 / c2 - 0.75):
|
| 199 |
+
nrows, ncols = r1, c1
|
| 200 |
+
else:
|
| 201 |
+
nrows, ncols = r2, c2
|
| 202 |
+
|
| 203 |
+
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
|
| 204 |
+
ax = np.asarray(axes).ravel()
|
| 205 |
+
for n, image in enumerate(ic):
|
| 206 |
+
ax[n].imshow(image, *args, **kwargs)
|
| 207 |
+
kwargs['ax'] = axes
|
| 208 |
+
return fig
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def imread(*args, **kwargs):
|
| 212 |
+
import matplotlib.image
|
| 213 |
+
|
| 214 |
+
return matplotlib.image.imread(*args, **kwargs)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _app_show():
|
| 218 |
+
from matplotlib.pyplot import show
|
| 219 |
+
|
| 220 |
+
show()
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/pil_plugin.ini
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[pil]
|
| 2 |
+
description = Image reading via the Python Imaging Library
|
| 3 |
+
provides = imread, imsave
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/pil_plugin.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ['imread', 'imsave']
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
from ...util import img_as_ubyte, img_as_uint
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def imread(fname, dtype=None, img_num=None, **kwargs):
|
| 10 |
+
"""Load an image from file.
|
| 11 |
+
|
| 12 |
+
Parameters
|
| 13 |
+
----------
|
| 14 |
+
fname : str or file
|
| 15 |
+
File name or file-like-object.
|
| 16 |
+
dtype : numpy dtype object or string specifier
|
| 17 |
+
Specifies data type of array elements.
|
| 18 |
+
img_num : int, optional
|
| 19 |
+
Specifies which image to read in a file with multiple images
|
| 20 |
+
(zero-indexed).
|
| 21 |
+
kwargs : keyword pairs, optional
|
| 22 |
+
Addition keyword arguments to pass through.
|
| 23 |
+
|
| 24 |
+
Notes
|
| 25 |
+
-----
|
| 26 |
+
Files are read using the Python Imaging Library.
|
| 27 |
+
See PIL docs [1]_ for a list of supported formats.
|
| 28 |
+
|
| 29 |
+
References
|
| 30 |
+
----------
|
| 31 |
+
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
|
| 32 |
+
"""
|
| 33 |
+
if isinstance(fname, str):
|
| 34 |
+
with open(fname, 'rb') as f:
|
| 35 |
+
im = Image.open(f)
|
| 36 |
+
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
|
| 37 |
+
else:
|
| 38 |
+
im = Image.open(fname)
|
| 39 |
+
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def pil_to_ndarray(image, dtype=None, img_num=None):
|
| 43 |
+
"""Import a PIL Image object to an ndarray, in memory.
|
| 44 |
+
|
| 45 |
+
Parameters
|
| 46 |
+
----------
|
| 47 |
+
Refer to ``imread``.
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
try:
|
| 51 |
+
# this will raise an IOError if the file is not readable
|
| 52 |
+
image.getdata()[0]
|
| 53 |
+
except OSError as e:
|
| 54 |
+
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
|
| 55 |
+
pillow_error_message = str(e)
|
| 56 |
+
error_message = (
|
| 57 |
+
f"Could not load '{image.filename}' \n"
|
| 58 |
+
f"Reason: '{pillow_error_message}'\n"
|
| 59 |
+
f"Please see documentation at: {site}"
|
| 60 |
+
)
|
| 61 |
+
raise ValueError(error_message)
|
| 62 |
+
frames = []
|
| 63 |
+
grayscale = None
|
| 64 |
+
i = 0
|
| 65 |
+
while 1:
|
| 66 |
+
try:
|
| 67 |
+
image.seek(i)
|
| 68 |
+
except EOFError:
|
| 69 |
+
break
|
| 70 |
+
|
| 71 |
+
frame = image
|
| 72 |
+
|
| 73 |
+
if img_num is not None and img_num != i:
|
| 74 |
+
image.getdata()[0]
|
| 75 |
+
i += 1
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
|
| 79 |
+
dtype = 'uint16'
|
| 80 |
+
|
| 81 |
+
if image.mode == 'P':
|
| 82 |
+
if grayscale is None:
|
| 83 |
+
grayscale = _palette_is_grayscale(image)
|
| 84 |
+
|
| 85 |
+
if grayscale:
|
| 86 |
+
frame = image.convert('L')
|
| 87 |
+
else:
|
| 88 |
+
if image.format == 'PNG' and 'transparency' in image.info:
|
| 89 |
+
frame = image.convert('RGBA')
|
| 90 |
+
else:
|
| 91 |
+
frame = image.convert('RGB')
|
| 92 |
+
|
| 93 |
+
elif image.mode == '1':
|
| 94 |
+
frame = image.convert('L')
|
| 95 |
+
|
| 96 |
+
elif 'A' in image.mode:
|
| 97 |
+
frame = image.convert('RGBA')
|
| 98 |
+
|
| 99 |
+
elif image.mode == 'CMYK':
|
| 100 |
+
frame = image.convert('RGB')
|
| 101 |
+
|
| 102 |
+
if image.mode.startswith('I;16'):
|
| 103 |
+
shape = image.size
|
| 104 |
+
dtype = '>u2' if image.mode.endswith('B') else '<u2'
|
| 105 |
+
if 'S' in image.mode:
|
| 106 |
+
dtype = dtype.replace('u', 'i')
|
| 107 |
+
frame = np.frombuffer(frame.tobytes(), dtype)
|
| 108 |
+
frame.shape = shape[::-1]
|
| 109 |
+
|
| 110 |
+
else:
|
| 111 |
+
frame = np.array(frame, dtype=dtype)
|
| 112 |
+
|
| 113 |
+
frames.append(frame)
|
| 114 |
+
i += 1
|
| 115 |
+
|
| 116 |
+
if img_num is not None:
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
if hasattr(image, 'fp') and image.fp:
|
| 120 |
+
image.fp.close()
|
| 121 |
+
|
| 122 |
+
if img_num is None and len(frames) > 1:
|
| 123 |
+
return np.array(frames)
|
| 124 |
+
elif frames:
|
| 125 |
+
return frames[0]
|
| 126 |
+
elif img_num:
|
| 127 |
+
raise IndexError(f'Could not find image #{img_num}')
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _palette_is_grayscale(pil_image):
|
| 131 |
+
"""Return True if PIL image in palette mode is grayscale.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
pil_image : PIL image
|
| 136 |
+
PIL Image that is in Palette mode.
|
| 137 |
+
|
| 138 |
+
Returns
|
| 139 |
+
-------
|
| 140 |
+
is_grayscale : bool
|
| 141 |
+
True if all colors in image palette are gray.
|
| 142 |
+
"""
|
| 143 |
+
if pil_image.mode != 'P':
|
| 144 |
+
raise ValueError('pil_image.mode must be equal to "P".')
|
| 145 |
+
# get palette as an array with R, G, B columns
|
| 146 |
+
# Starting in pillow 9.1 palettes may have less than 256 entries
|
| 147 |
+
palette = np.asarray(pil_image.getpalette()).reshape((-1, 3))
|
| 148 |
+
# Not all palette colors are used; unused colors have junk values.
|
| 149 |
+
start, stop = pil_image.getextrema()
|
| 150 |
+
valid_palette = palette[start : stop + 1]
|
| 151 |
+
# Image is grayscale if channel differences (R - G and G - B)
|
| 152 |
+
# are all zero.
|
| 153 |
+
return np.allclose(np.diff(valid_palette), 0)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def ndarray_to_pil(arr, format_str=None):
|
| 157 |
+
"""Export an ndarray to a PIL object.
|
| 158 |
+
|
| 159 |
+
Parameters
|
| 160 |
+
----------
|
| 161 |
+
Refer to ``imsave``.
|
| 162 |
+
|
| 163 |
+
"""
|
| 164 |
+
if arr.ndim == 3:
|
| 165 |
+
arr = img_as_ubyte(arr)
|
| 166 |
+
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
|
| 167 |
+
|
| 168 |
+
elif format_str in ['png', 'PNG']:
|
| 169 |
+
mode = 'I;16'
|
| 170 |
+
mode_base = 'I'
|
| 171 |
+
|
| 172 |
+
if arr.dtype.kind == 'f':
|
| 173 |
+
arr = img_as_uint(arr)
|
| 174 |
+
|
| 175 |
+
elif arr.max() < 256 and arr.min() >= 0:
|
| 176 |
+
arr = arr.astype(np.uint8)
|
| 177 |
+
mode = mode_base = 'L'
|
| 178 |
+
|
| 179 |
+
else:
|
| 180 |
+
arr = img_as_uint(arr)
|
| 181 |
+
|
| 182 |
+
else:
|
| 183 |
+
arr = img_as_ubyte(arr)
|
| 184 |
+
mode = 'L'
|
| 185 |
+
mode_base = 'L'
|
| 186 |
+
|
| 187 |
+
try:
|
| 188 |
+
array_buffer = arr.tobytes()
|
| 189 |
+
except AttributeError:
|
| 190 |
+
array_buffer = arr.tostring() # Numpy < 1.9
|
| 191 |
+
|
| 192 |
+
if arr.ndim == 2:
|
| 193 |
+
im = Image.new(mode_base, arr.T.shape)
|
| 194 |
+
try:
|
| 195 |
+
im.frombytes(array_buffer, 'raw', mode)
|
| 196 |
+
except AttributeError:
|
| 197 |
+
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
|
| 198 |
+
else:
|
| 199 |
+
image_shape = (arr.shape[1], arr.shape[0])
|
| 200 |
+
try:
|
| 201 |
+
im = Image.frombytes(mode, image_shape, array_buffer)
|
| 202 |
+
except AttributeError:
|
| 203 |
+
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
|
| 204 |
+
return im
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def imsave(fname, arr, format_str=None, **kwargs):
|
| 208 |
+
"""Save an image to disk.
|
| 209 |
+
|
| 210 |
+
Parameters
|
| 211 |
+
----------
|
| 212 |
+
fname : str or file-like object
|
| 213 |
+
Name of destination file.
|
| 214 |
+
arr : ndarray of uint8 or float
|
| 215 |
+
Array (image) to save. Arrays of data-type uint8 should have
|
| 216 |
+
values in [0, 255], whereas floating-point arrays must be
|
| 217 |
+
in [0, 1].
|
| 218 |
+
format_str: str
|
| 219 |
+
Format to save as, this is defaulted to PNG if using a file-like
|
| 220 |
+
object; this will be derived from the extension if fname is a string
|
| 221 |
+
kwargs: dict
|
| 222 |
+
Keyword arguments to the Pillow save function (or tifffile save
|
| 223 |
+
function, for Tiff files). These are format dependent. For example,
|
| 224 |
+
Pillow's JPEG save function supports an integer ``quality`` argument
|
| 225 |
+
with values in [1, 95], while TIFFFile supports a ``compress``
|
| 226 |
+
integer argument with values in [0, 9].
|
| 227 |
+
|
| 228 |
+
Notes
|
| 229 |
+
-----
|
| 230 |
+
Use the Python Imaging Library.
|
| 231 |
+
See PIL docs [1]_ for a list of other supported formats.
|
| 232 |
+
All images besides single channel PNGs are converted using `img_as_uint8`.
|
| 233 |
+
Single Channel PNGs have the following behavior:
|
| 234 |
+
- Integer values in [0, 255] and Boolean types -> img_as_uint8
|
| 235 |
+
- Floating point and other integers -> img_as_uint16
|
| 236 |
+
|
| 237 |
+
References
|
| 238 |
+
----------
|
| 239 |
+
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
|
| 240 |
+
"""
|
| 241 |
+
# default to PNG if file-like object
|
| 242 |
+
if not isinstance(fname, str) and format_str is None:
|
| 243 |
+
format_str = "PNG"
|
| 244 |
+
# Check for png in filename
|
| 245 |
+
if isinstance(fname, str) and fname.lower().endswith(".png"):
|
| 246 |
+
format_str = "PNG"
|
| 247 |
+
|
| 248 |
+
arr = np.asanyarray(arr)
|
| 249 |
+
|
| 250 |
+
if arr.dtype.kind == 'b':
|
| 251 |
+
arr = arr.astype(np.uint8)
|
| 252 |
+
|
| 253 |
+
if arr.ndim not in (2, 3):
|
| 254 |
+
raise ValueError(f"Invalid shape for image array: {arr.shape}")
|
| 255 |
+
|
| 256 |
+
if arr.ndim == 3:
|
| 257 |
+
if arr.shape[2] not in (3, 4):
|
| 258 |
+
raise ValueError("Invalid number of channels in image array.")
|
| 259 |
+
|
| 260 |
+
img = ndarray_to_pil(arr, format_str=format_str)
|
| 261 |
+
img.save(fname, format=format_str, **kwargs)
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/simpleitk_plugin.ini
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[simpleitk]
|
| 2 |
+
description = Image reading and writing via SimpleITK
|
| 3 |
+
provides = imread, imsave
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/tifffile_plugin.ini
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tifffile]
|
| 2 |
+
description = Load and save TIFF and TIFF-based images using tifffile.py
|
| 3 |
+
provides = imread, imsave
|
vlmpy310/lib/python3.10/site-packages/skimage/io/_plugins/tifffile_plugin.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tifffile import imread as tifffile_imread
|
| 2 |
+
from tifffile import imwrite as tifffile_imwrite
|
| 3 |
+
|
| 4 |
+
__all__ = ['imread', 'imsave']
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def imsave(fname, arr, **kwargs):
|
| 8 |
+
"""Load a tiff image to file.
|
| 9 |
+
|
| 10 |
+
Parameters
|
| 11 |
+
----------
|
| 12 |
+
fname : str or file
|
| 13 |
+
File name or file-like object.
|
| 14 |
+
arr : ndarray
|
| 15 |
+
The array to write.
|
| 16 |
+
kwargs : keyword pairs, optional
|
| 17 |
+
Additional keyword arguments to pass through (see ``tifffile``'s
|
| 18 |
+
``imwrite`` function).
|
| 19 |
+
|
| 20 |
+
Notes
|
| 21 |
+
-----
|
| 22 |
+
Provided by the tifffile library [1]_, and supports many
|
| 23 |
+
advanced image types including multi-page and floating-point.
|
| 24 |
+
|
| 25 |
+
This implementation will set ``photometric='RGB'`` when writing if the first
|
| 26 |
+
or last axis of `arr` has length 3 or 4. To override this, explicitly
|
| 27 |
+
pass the ``photometric`` kwarg.
|
| 28 |
+
|
| 29 |
+
This implementation will set ``planarconfig='SEPARATE'`` when writing if the
|
| 30 |
+
first axis of arr has length 3 or 4. To override this, explicitly
|
| 31 |
+
specify the ``planarconfig`` kwarg.
|
| 32 |
+
|
| 33 |
+
References
|
| 34 |
+
----------
|
| 35 |
+
.. [1] https://pypi.org/project/tifffile/
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
if arr.shape[0] in [3, 4]:
|
| 39 |
+
if 'planarconfig' not in kwargs:
|
| 40 |
+
kwargs['planarconfig'] = 'SEPARATE'
|
| 41 |
+
rgb = True
|
| 42 |
+
else:
|
| 43 |
+
rgb = arr.shape[-1] in [3, 4]
|
| 44 |
+
if rgb and 'photometric' not in kwargs:
|
| 45 |
+
kwargs['photometric'] = 'RGB'
|
| 46 |
+
|
| 47 |
+
return tifffile_imwrite(fname, arr, **kwargs)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def imread(fname, **kwargs):
|
| 51 |
+
"""Load a tiff image from file.
|
| 52 |
+
|
| 53 |
+
Parameters
|
| 54 |
+
----------
|
| 55 |
+
fname : str or file
|
| 56 |
+
File name or file-like-object.
|
| 57 |
+
kwargs : keyword pairs, optional
|
| 58 |
+
Additional keyword arguments to pass through (see ``tifffile``'s
|
| 59 |
+
``imread`` function).
|
| 60 |
+
|
| 61 |
+
Notes
|
| 62 |
+
-----
|
| 63 |
+
Provided by the tifffile library [1]_, and supports many
|
| 64 |
+
advanced image types including multi-page and floating point.
|
| 65 |
+
|
| 66 |
+
References
|
| 67 |
+
----------
|
| 68 |
+
.. [1] https://pypi.org/project/tifffile/
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
if 'img_num' in kwargs:
|
| 72 |
+
kwargs['key'] = kwargs.pop('img_num')
|
| 73 |
+
|
| 74 |
+
return tifffile_imread(fname, **kwargs)
|
vlmpy310/lib/python3.10/site-packages/skimage/util/__init__.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generic utilities.
|
| 2 |
+
|
| 3 |
+
This module contains a number of utility functions to work with images in general.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import functools
|
| 7 |
+
import warnings
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
# keep .dtype imports first to avoid circular imports
|
| 12 |
+
from .dtype import (
|
| 13 |
+
dtype_limits,
|
| 14 |
+
img_as_float,
|
| 15 |
+
img_as_float32,
|
| 16 |
+
img_as_float64,
|
| 17 |
+
img_as_bool,
|
| 18 |
+
img_as_int,
|
| 19 |
+
img_as_ubyte,
|
| 20 |
+
img_as_uint,
|
| 21 |
+
)
|
| 22 |
+
from ._slice_along_axes import slice_along_axes
|
| 23 |
+
from ._invert import invert
|
| 24 |
+
from ._label import label_points
|
| 25 |
+
from ._montage import montage
|
| 26 |
+
from ._map_array import map_array
|
| 27 |
+
from ._regular_grid import regular_grid, regular_seeds
|
| 28 |
+
from .apply_parallel import apply_parallel
|
| 29 |
+
from .arraycrop import crop
|
| 30 |
+
from .compare import compare_images
|
| 31 |
+
from .noise import random_noise
|
| 32 |
+
from .shape import view_as_blocks, view_as_windows
|
| 33 |
+
from .unique import unique_rows
|
| 34 |
+
from .lookfor import lookfor
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
__all__ = [
|
| 38 |
+
'img_as_float32',
|
| 39 |
+
'img_as_float64',
|
| 40 |
+
'img_as_float',
|
| 41 |
+
'img_as_int',
|
| 42 |
+
'img_as_uint',
|
| 43 |
+
'img_as_ubyte',
|
| 44 |
+
'img_as_bool',
|
| 45 |
+
'dtype_limits',
|
| 46 |
+
'view_as_blocks',
|
| 47 |
+
'view_as_windows',
|
| 48 |
+
'slice_along_axes',
|
| 49 |
+
'crop',
|
| 50 |
+
'compare_images',
|
| 51 |
+
'map_array',
|
| 52 |
+
'montage',
|
| 53 |
+
'random_noise',
|
| 54 |
+
'regular_grid',
|
| 55 |
+
'regular_seeds',
|
| 56 |
+
'apply_parallel',
|
| 57 |
+
'invert',
|
| 58 |
+
'unique_rows',
|
| 59 |
+
'label_points',
|
| 60 |
+
'lookfor',
|
| 61 |
+
]
|
vlmpy310/lib/python3.10/site-packages/skimage/util/__pycache__/_regular_grid.cpython-310.pyc
ADDED
|
Binary file (4.06 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/util/__pycache__/lookfor.cpython-310.pyc
ADDED
|
Binary file (1.04 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_invert.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from .dtype import dtype_limits
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def invert(image, signed_float=False):
|
| 6 |
+
"""Invert an image.
|
| 7 |
+
|
| 8 |
+
Invert the intensity range of the input image, so that the dtype maximum
|
| 9 |
+
is now the dtype minimum, and vice-versa. This operation is
|
| 10 |
+
slightly different depending on the input dtype:
|
| 11 |
+
|
| 12 |
+
- unsigned integers: subtract the image from the dtype maximum
|
| 13 |
+
- signed integers: subtract the image from -1 (see Notes)
|
| 14 |
+
- floats: subtract the image from 1 (if signed_float is False, so we
|
| 15 |
+
assume the image is unsigned), or from 0 (if signed_float is True).
|
| 16 |
+
|
| 17 |
+
See the examples for clarification.
|
| 18 |
+
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
image : ndarray
|
| 22 |
+
Input image.
|
| 23 |
+
signed_float : bool, optional
|
| 24 |
+
If True and the image is of type float, the range is assumed to
|
| 25 |
+
be [-1, 1]. If False and the image is of type float, the range is
|
| 26 |
+
assumed to be [0, 1].
|
| 27 |
+
|
| 28 |
+
Returns
|
| 29 |
+
-------
|
| 30 |
+
inverted : ndarray
|
| 31 |
+
Inverted image.
|
| 32 |
+
|
| 33 |
+
Notes
|
| 34 |
+
-----
|
| 35 |
+
Ideally, for signed integers we would simply multiply by -1. However,
|
| 36 |
+
signed integer ranges are asymmetric. For example, for np.int8, the range
|
| 37 |
+
of possible values is [-128, 127], so that -128 * -1 equals -128! By
|
| 38 |
+
subtracting from -1, we correctly map the maximum dtype value to the
|
| 39 |
+
minimum.
|
| 40 |
+
|
| 41 |
+
Examples
|
| 42 |
+
--------
|
| 43 |
+
>>> img = np.array([[100, 0, 200],
|
| 44 |
+
... [ 0, 50, 0],
|
| 45 |
+
... [ 30, 0, 255]], np.uint8)
|
| 46 |
+
>>> invert(img)
|
| 47 |
+
array([[155, 255, 55],
|
| 48 |
+
[255, 205, 255],
|
| 49 |
+
[225, 255, 0]], dtype=uint8)
|
| 50 |
+
>>> img2 = np.array([[ -2, 0, -128],
|
| 51 |
+
... [127, 0, 5]], np.int8)
|
| 52 |
+
>>> invert(img2)
|
| 53 |
+
array([[ 1, -1, 127],
|
| 54 |
+
[-128, -1, -6]], dtype=int8)
|
| 55 |
+
>>> img3 = np.array([[ 0., 1., 0.5, 0.75]])
|
| 56 |
+
>>> invert(img3)
|
| 57 |
+
array([[1. , 0. , 0.5 , 0.25]])
|
| 58 |
+
>>> img4 = np.array([[ 0., 1., -1., -0.25]])
|
| 59 |
+
>>> invert(img4, signed_float=True)
|
| 60 |
+
array([[-0. , -1. , 1. , 0.25]])
|
| 61 |
+
"""
|
| 62 |
+
if image.dtype == 'bool':
|
| 63 |
+
inverted = ~image
|
| 64 |
+
elif np.issubdtype(image.dtype, np.unsignedinteger):
|
| 65 |
+
max_val = dtype_limits(image, clip_negative=False)[1]
|
| 66 |
+
inverted = np.subtract(max_val, image, dtype=image.dtype)
|
| 67 |
+
elif np.issubdtype(image.dtype, np.signedinteger):
|
| 68 |
+
inverted = np.subtract(-1, image, dtype=image.dtype)
|
| 69 |
+
else: # float dtype
|
| 70 |
+
if signed_float:
|
| 71 |
+
inverted = -image
|
| 72 |
+
else:
|
| 73 |
+
inverted = np.subtract(1, image, dtype=image.dtype)
|
| 74 |
+
return inverted
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_label.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
__all__ = ["label_points"]
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def label_points(coords, output_shape):
|
| 7 |
+
"""Assign unique integer labels to coordinates on an image mask
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
coords: ndarray
|
| 12 |
+
An array of N coordinates with dimension D
|
| 13 |
+
output_shape: tuple
|
| 14 |
+
The shape of the mask on which `coords` are labelled
|
| 15 |
+
|
| 16 |
+
Returns
|
| 17 |
+
-------
|
| 18 |
+
labels: ndarray
|
| 19 |
+
A mask of zeroes containing unique integer labels at the `coords`
|
| 20 |
+
|
| 21 |
+
Examples
|
| 22 |
+
--------
|
| 23 |
+
>>> import numpy as np
|
| 24 |
+
>>> from skimage.util._label import label_points
|
| 25 |
+
>>> coords = np.array([[0, 1], [2, 2]])
|
| 26 |
+
>>> output_shape = (5, 5)
|
| 27 |
+
>>> mask = label_points(coords, output_shape)
|
| 28 |
+
>>> mask
|
| 29 |
+
array([[0, 1, 0, 0, 0],
|
| 30 |
+
[0, 0, 0, 0, 0],
|
| 31 |
+
[0, 0, 2, 0, 0],
|
| 32 |
+
[0, 0, 0, 0, 0],
|
| 33 |
+
[0, 0, 0, 0, 0]], dtype=uint64)
|
| 34 |
+
|
| 35 |
+
Notes
|
| 36 |
+
-----
|
| 37 |
+
- The labels are assigned to coordinates that are converted to
|
| 38 |
+
integer and considered to start from 0.
|
| 39 |
+
- Coordinates that are out of range of the mask raise an IndexError.
|
| 40 |
+
- Negative coordinates raise a ValueError
|
| 41 |
+
"""
|
| 42 |
+
if coords.shape[1] != len(output_shape):
|
| 43 |
+
raise ValueError("Dimensionality of points should match the " "output shape")
|
| 44 |
+
|
| 45 |
+
if np.any(coords < 0):
|
| 46 |
+
raise ValueError("Coordinates should be positive and start from 0")
|
| 47 |
+
|
| 48 |
+
np_indices = tuple(np.transpose(np.round(coords).astype(int, copy=False)))
|
| 49 |
+
labels = np.zeros(output_shape, dtype=np.uint64)
|
| 50 |
+
labels[np_indices] = np.arange(1, coords.shape[0] + 1)
|
| 51 |
+
return labels
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_map_array.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def map_array(input_arr, input_vals, output_vals, out=None):
|
| 5 |
+
"""Map values from input array from input_vals to output_vals.
|
| 6 |
+
|
| 7 |
+
Parameters
|
| 8 |
+
----------
|
| 9 |
+
input_arr : array of int, shape (M[, ...])
|
| 10 |
+
The input label image.
|
| 11 |
+
input_vals : array of int, shape (K,)
|
| 12 |
+
The values to map from.
|
| 13 |
+
output_vals : array, shape (K,)
|
| 14 |
+
The values to map to.
|
| 15 |
+
out: array, same shape as `input_arr`
|
| 16 |
+
The output array. Will be created if not provided. It should
|
| 17 |
+
have the same dtype as `output_vals`.
|
| 18 |
+
|
| 19 |
+
Returns
|
| 20 |
+
-------
|
| 21 |
+
out : array, same shape as `input_arr`
|
| 22 |
+
The array of mapped values.
|
| 23 |
+
|
| 24 |
+
Notes
|
| 25 |
+
-----
|
| 26 |
+
If `input_arr` contains values that aren't covered by `input_vals`, they
|
| 27 |
+
are set to 0.
|
| 28 |
+
|
| 29 |
+
Examples
|
| 30 |
+
--------
|
| 31 |
+
>>> import numpy as np
|
| 32 |
+
>>> import skimage as ski
|
| 33 |
+
>>> ski.util.map_array(
|
| 34 |
+
... input_arr=np.array([[0, 2, 2, 0], [3, 4, 5, 0]]),
|
| 35 |
+
... input_vals=np.array([1, 2, 3, 4, 6]),
|
| 36 |
+
... output_vals=np.array([6, 7, 8, 9, 10]),
|
| 37 |
+
... )
|
| 38 |
+
array([[0, 7, 7, 0],
|
| 39 |
+
[8, 9, 0, 0]])
|
| 40 |
+
"""
|
| 41 |
+
from ._remap import _map_array
|
| 42 |
+
|
| 43 |
+
if not np.issubdtype(input_arr.dtype, np.integer):
|
| 44 |
+
raise TypeError('The dtype of an array to be remapped should be integer.')
|
| 45 |
+
# We ravel the input array for simplicity of iteration in Cython:
|
| 46 |
+
orig_shape = input_arr.shape
|
| 47 |
+
# NumPy docs for `np.ravel()` says:
|
| 48 |
+
# "When a view is desired in as many cases as possible,
|
| 49 |
+
# arr.reshape(-1) may be preferable."
|
| 50 |
+
input_arr = input_arr.reshape(-1)
|
| 51 |
+
if out is None:
|
| 52 |
+
out = np.empty(orig_shape, dtype=output_vals.dtype)
|
| 53 |
+
elif out.shape != orig_shape:
|
| 54 |
+
raise ValueError(
|
| 55 |
+
'If out array is provided, it should have the same shape as '
|
| 56 |
+
f'the input array. Input array has shape {orig_shape}, provided '
|
| 57 |
+
f'output array has shape {out.shape}.'
|
| 58 |
+
)
|
| 59 |
+
try:
|
| 60 |
+
out_view = out.view()
|
| 61 |
+
out_view.shape = (-1,) # no-copy reshape/ravel
|
| 62 |
+
except AttributeError: # if out strides are not compatible with 0-copy
|
| 63 |
+
raise ValueError(
|
| 64 |
+
'If out array is provided, it should be either contiguous '
|
| 65 |
+
f'or 1-dimensional. Got array with shape {out.shape} and '
|
| 66 |
+
f'strides {out.strides}.'
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# ensure all arrays have matching types before sending to Cython
|
| 70 |
+
input_vals = input_vals.astype(input_arr.dtype, copy=False)
|
| 71 |
+
output_vals = output_vals.astype(out.dtype, copy=False)
|
| 72 |
+
_map_array(input_arr, out_view, input_vals, output_vals)
|
| 73 |
+
return out
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class ArrayMap:
|
| 77 |
+
"""Class designed to mimic mapping by NumPy array indexing.
|
| 78 |
+
|
| 79 |
+
This class is designed to replicate the use of NumPy arrays for mapping
|
| 80 |
+
values with indexing:
|
| 81 |
+
|
| 82 |
+
>>> values = np.array([0.25, 0.5, 1.0])
|
| 83 |
+
>>> indices = np.array([[0, 0, 1], [2, 2, 1]])
|
| 84 |
+
>>> values[indices]
|
| 85 |
+
array([[0.25, 0.25, 0.5 ],
|
| 86 |
+
[1. , 1. , 0.5 ]])
|
| 87 |
+
|
| 88 |
+
The issue with this indexing is that you need a very large ``values``
|
| 89 |
+
array if the values in the ``indices`` array are large.
|
| 90 |
+
|
| 91 |
+
>>> values = np.array([0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0])
|
| 92 |
+
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
|
| 93 |
+
>>> values[indices]
|
| 94 |
+
array([[0.25, 0.25, 1. ],
|
| 95 |
+
[0.25, 1. , 1. ]])
|
| 96 |
+
|
| 97 |
+
Using this class, the approach is similar, but there is no need to
|
| 98 |
+
create a large values array:
|
| 99 |
+
|
| 100 |
+
>>> in_indices = np.array([0, 10])
|
| 101 |
+
>>> out_values = np.array([0.25, 1.0])
|
| 102 |
+
>>> values = ArrayMap(in_indices, out_values)
|
| 103 |
+
>>> values
|
| 104 |
+
ArrayMap(array([ 0, 10]), array([0.25, 1. ]))
|
| 105 |
+
>>> print(values)
|
| 106 |
+
ArrayMap:
|
| 107 |
+
0 → 0.25
|
| 108 |
+
10 → 1.0
|
| 109 |
+
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
|
| 110 |
+
>>> values[indices]
|
| 111 |
+
array([[0.25, 0.25, 1. ],
|
| 112 |
+
[0.25, 1. , 1. ]])
|
| 113 |
+
|
| 114 |
+
Parameters
|
| 115 |
+
----------
|
| 116 |
+
in_values : array of int, shape (K,)
|
| 117 |
+
The source values from which to map.
|
| 118 |
+
out_values : array, shape (K,)
|
| 119 |
+
The destination values from which to map.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
def __init__(self, in_values, out_values):
|
| 123 |
+
self.in_values = in_values
|
| 124 |
+
self.out_values = out_values
|
| 125 |
+
self._max_str_lines = 4
|
| 126 |
+
self._array = None
|
| 127 |
+
|
| 128 |
+
def __len__(self):
|
| 129 |
+
"""Return one more than the maximum label value being remapped."""
|
| 130 |
+
return np.max(self.in_values) + 1
|
| 131 |
+
|
| 132 |
+
def __array__(self, dtype=None, copy=None):
|
| 133 |
+
"""Return an array that behaves like the arraymap when indexed.
|
| 134 |
+
|
| 135 |
+
This array can be very large: it is the size of the largest value
|
| 136 |
+
in the ``in_vals`` array, plus one.
|
| 137 |
+
"""
|
| 138 |
+
if dtype is None:
|
| 139 |
+
dtype = self.out_values.dtype
|
| 140 |
+
output = np.zeros(np.max(self.in_values) + 1, dtype=dtype)
|
| 141 |
+
output[self.in_values] = self.out_values
|
| 142 |
+
return output
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def dtype(self):
|
| 146 |
+
return self.out_values.dtype
|
| 147 |
+
|
| 148 |
+
def __repr__(self):
|
| 149 |
+
return f'ArrayMap({repr(self.in_values)}, {repr(self.out_values)})'
|
| 150 |
+
|
| 151 |
+
def __str__(self):
|
| 152 |
+
if len(self.in_values) <= self._max_str_lines + 1:
|
| 153 |
+
rows = range(len(self.in_values))
|
| 154 |
+
string = '\n'.join(
|
| 155 |
+
['ArrayMap:']
|
| 156 |
+
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows]
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
rows0 = list(range(0, self._max_str_lines // 2))
|
| 160 |
+
rows1 = list(range(-self._max_str_lines // 2, 0))
|
| 161 |
+
string = '\n'.join(
|
| 162 |
+
['ArrayMap:']
|
| 163 |
+
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows0]
|
| 164 |
+
+ [' ...']
|
| 165 |
+
+ [f' {self.in_values[i]} → {self.out_values[i]}' for i in rows1]
|
| 166 |
+
)
|
| 167 |
+
return string
|
| 168 |
+
|
| 169 |
+
def __call__(self, arr):
|
| 170 |
+
return self.__getitem__(arr)
|
| 171 |
+
|
| 172 |
+
def __getitem__(self, index):
|
| 173 |
+
scalar = np.isscalar(index)
|
| 174 |
+
if scalar:
|
| 175 |
+
index = np.array([index])
|
| 176 |
+
elif isinstance(index, slice):
|
| 177 |
+
start = index.start or 0 # treat None or 0 the same way
|
| 178 |
+
stop = index.stop if index.stop is not None else len(self)
|
| 179 |
+
step = index.step
|
| 180 |
+
index = np.arange(start, stop, step)
|
| 181 |
+
if index.dtype == bool:
|
| 182 |
+
index = np.flatnonzero(index)
|
| 183 |
+
|
| 184 |
+
out = map_array(
|
| 185 |
+
index,
|
| 186 |
+
self.in_values.astype(index.dtype, copy=False),
|
| 187 |
+
self.out_values,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
if scalar:
|
| 191 |
+
out = out[0]
|
| 192 |
+
return out
|
| 193 |
+
|
| 194 |
+
def __setitem__(self, indices, values):
|
| 195 |
+
if self._array is None:
|
| 196 |
+
self._array = self.__array__()
|
| 197 |
+
self._array[indices] = values
|
| 198 |
+
self.in_values = np.flatnonzero(self._array)
|
| 199 |
+
self.out_values = self._array[self.in_values]
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_montage.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from .._shared import utils
|
| 4 |
+
from .. import exposure
|
| 5 |
+
|
| 6 |
+
__all__ = ['montage']
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@utils.channel_as_last_axis(multichannel_output=False)
|
| 10 |
+
def montage(
|
| 11 |
+
arr_in,
|
| 12 |
+
fill='mean',
|
| 13 |
+
rescale_intensity=False,
|
| 14 |
+
grid_shape=None,
|
| 15 |
+
padding_width=0,
|
| 16 |
+
*,
|
| 17 |
+
channel_axis=None,
|
| 18 |
+
):
|
| 19 |
+
"""Create a montage of several single- or multichannel images.
|
| 20 |
+
|
| 21 |
+
Create a rectangular montage from an input array representing an ensemble
|
| 22 |
+
of equally shaped single- (gray) or multichannel (color) images.
|
| 23 |
+
|
| 24 |
+
For example, ``montage(arr_in)`` called with the following `arr_in`
|
| 25 |
+
|
| 26 |
+
+---+---+---+
|
| 27 |
+
| 1 | 2 | 3 |
|
| 28 |
+
+---+---+---+
|
| 29 |
+
|
| 30 |
+
will return
|
| 31 |
+
|
| 32 |
+
+---+---+
|
| 33 |
+
| 1 | 2 |
|
| 34 |
+
+---+---+
|
| 35 |
+
| 3 | * |
|
| 36 |
+
+---+---+
|
| 37 |
+
|
| 38 |
+
where the '*' patch will be determined by the `fill` parameter.
|
| 39 |
+
|
| 40 |
+
Parameters
|
| 41 |
+
----------
|
| 42 |
+
arr_in : ndarray, shape (K, M, N[, C])
|
| 43 |
+
An array representing an ensemble of `K` images of equal shape.
|
| 44 |
+
fill : float or array-like of floats or 'mean', optional
|
| 45 |
+
Value to fill the padding areas and/or the extra tiles in
|
| 46 |
+
the output array. Has to be `float` for single channel collections.
|
| 47 |
+
For multichannel collections has to be an array-like of shape of
|
| 48 |
+
number of channels. If `mean`, uses the mean value over all images.
|
| 49 |
+
rescale_intensity : bool, optional
|
| 50 |
+
Whether to rescale the intensity of each image to [0, 1].
|
| 51 |
+
grid_shape : tuple, optional
|
| 52 |
+
The desired grid shape for the montage `(ntiles_row, ntiles_column)`.
|
| 53 |
+
The default aspect ratio is square.
|
| 54 |
+
padding_width : int, optional
|
| 55 |
+
The size of the spacing between the tiles and between the tiles and
|
| 56 |
+
the borders. If non-zero, makes the boundaries of individual images
|
| 57 |
+
easier to perceive.
|
| 58 |
+
channel_axis : int or None, optional
|
| 59 |
+
If None, the image is assumed to be a grayscale (single channel) image.
|
| 60 |
+
Otherwise, this parameter indicates which axis of the array corresponds
|
| 61 |
+
to channels.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
arr_out : (K*(M+p)+p, K*(N+p)+p[, C]) ndarray
|
| 66 |
+
Output array with input images glued together (including padding `p`).
|
| 67 |
+
|
| 68 |
+
Examples
|
| 69 |
+
--------
|
| 70 |
+
>>> import numpy as np
|
| 71 |
+
>>> from skimage.util import montage
|
| 72 |
+
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
|
| 73 |
+
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
|
| 74 |
+
array([[[ 0, 1],
|
| 75 |
+
[ 2, 3]],
|
| 76 |
+
[[ 4, 5],
|
| 77 |
+
[ 6, 7]],
|
| 78 |
+
[[ 8, 9],
|
| 79 |
+
[10, 11]]])
|
| 80 |
+
>>> arr_out = montage(arr_in)
|
| 81 |
+
>>> arr_out.shape
|
| 82 |
+
(4, 4)
|
| 83 |
+
>>> arr_out
|
| 84 |
+
array([[ 0, 1, 4, 5],
|
| 85 |
+
[ 2, 3, 6, 7],
|
| 86 |
+
[ 8, 9, 5, 5],
|
| 87 |
+
[10, 11, 5, 5]])
|
| 88 |
+
>>> arr_in.mean()
|
| 89 |
+
5.5
|
| 90 |
+
>>> arr_out_nonsquare = montage(arr_in, grid_shape=(1, 3))
|
| 91 |
+
>>> arr_out_nonsquare
|
| 92 |
+
array([[ 0, 1, 4, 5, 8, 9],
|
| 93 |
+
[ 2, 3, 6, 7, 10, 11]])
|
| 94 |
+
>>> arr_out_nonsquare.shape
|
| 95 |
+
(2, 6)
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
if channel_axis is not None:
|
| 99 |
+
arr_in = np.asarray(arr_in)
|
| 100 |
+
else:
|
| 101 |
+
arr_in = np.asarray(arr_in)[..., np.newaxis]
|
| 102 |
+
|
| 103 |
+
if arr_in.ndim != 4:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
'Input array has to be 3-dimensional for grayscale '
|
| 106 |
+
'images, or 4-dimensional with a `channel_axis` '
|
| 107 |
+
'specified.'
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
n_images, n_rows, n_cols, n_chan = arr_in.shape
|
| 111 |
+
|
| 112 |
+
if grid_shape:
|
| 113 |
+
ntiles_row, ntiles_col = (int(s) for s in grid_shape)
|
| 114 |
+
else:
|
| 115 |
+
ntiles_row = ntiles_col = int(np.ceil(np.sqrt(n_images)))
|
| 116 |
+
|
| 117 |
+
# Rescale intensity if necessary
|
| 118 |
+
if rescale_intensity:
|
| 119 |
+
for i in range(n_images):
|
| 120 |
+
arr_in[i] = exposure.rescale_intensity(arr_in[i])
|
| 121 |
+
|
| 122 |
+
# Calculate the fill value
|
| 123 |
+
if fill == 'mean':
|
| 124 |
+
fill = arr_in.mean(axis=(0, 1, 2))
|
| 125 |
+
fill = np.atleast_1d(fill).astype(arr_in.dtype)
|
| 126 |
+
|
| 127 |
+
# Pre-allocate an array with padding for montage
|
| 128 |
+
n_pad = padding_width
|
| 129 |
+
arr_out = np.empty(
|
| 130 |
+
(
|
| 131 |
+
(n_rows + n_pad) * ntiles_row + n_pad,
|
| 132 |
+
(n_cols + n_pad) * ntiles_col + n_pad,
|
| 133 |
+
n_chan,
|
| 134 |
+
),
|
| 135 |
+
dtype=arr_in.dtype,
|
| 136 |
+
)
|
| 137 |
+
for idx_chan in range(n_chan):
|
| 138 |
+
arr_out[..., idx_chan] = fill[idx_chan]
|
| 139 |
+
|
| 140 |
+
slices_row = [
|
| 141 |
+
slice(n_pad + (n_rows + n_pad) * n, n_pad + (n_rows + n_pad) * n + n_rows)
|
| 142 |
+
for n in range(ntiles_row)
|
| 143 |
+
]
|
| 144 |
+
slices_col = [
|
| 145 |
+
slice(n_pad + (n_cols + n_pad) * n, n_pad + (n_cols + n_pad) * n + n_cols)
|
| 146 |
+
for n in range(ntiles_col)
|
| 147 |
+
]
|
| 148 |
+
|
| 149 |
+
# Copy the data to the output array
|
| 150 |
+
for idx_image, image in enumerate(arr_in):
|
| 151 |
+
idx_sr = idx_image // ntiles_col
|
| 152 |
+
idx_sc = idx_image % ntiles_col
|
| 153 |
+
arr_out[slices_row[idx_sr], slices_col[idx_sc], :] = image
|
| 154 |
+
|
| 155 |
+
if channel_axis is not None:
|
| 156 |
+
return arr_out
|
| 157 |
+
else:
|
| 158 |
+
return arr_out[..., 0]
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_regular_grid.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def regular_grid(ar_shape, n_points):
|
| 5 |
+
"""Find `n_points` regularly spaced along `ar_shape`.
|
| 6 |
+
|
| 7 |
+
The returned points (as slices) should be as close to cubically-spaced as
|
| 8 |
+
possible. Essentially, the points are spaced by the Nth root of the input
|
| 9 |
+
array size, where N is the number of dimensions. However, if an array
|
| 10 |
+
dimension cannot fit a full step size, it is "discarded", and the
|
| 11 |
+
computation is done for only the remaining dimensions.
|
| 12 |
+
|
| 13 |
+
Parameters
|
| 14 |
+
----------
|
| 15 |
+
ar_shape : array-like of ints
|
| 16 |
+
The shape of the space embedding the grid. ``len(ar_shape)`` is the
|
| 17 |
+
number of dimensions.
|
| 18 |
+
n_points : int
|
| 19 |
+
The (approximate) number of points to embed in the space.
|
| 20 |
+
|
| 21 |
+
Returns
|
| 22 |
+
-------
|
| 23 |
+
slices : tuple of slice objects
|
| 24 |
+
A slice along each dimension of `ar_shape`, such that the intersection
|
| 25 |
+
of all the slices give the coordinates of regularly spaced points.
|
| 26 |
+
|
| 27 |
+
.. versionchanged:: 0.14.1
|
| 28 |
+
In scikit-image 0.14.1 and 0.15, the return type was changed from a
|
| 29 |
+
list to a tuple to ensure `compatibility with Numpy 1.15`_ and
|
| 30 |
+
higher. If your code requires the returned result to be a list, you
|
| 31 |
+
may convert the output of this function to a list with:
|
| 32 |
+
|
| 33 |
+
>>> result = list(regular_grid(ar_shape=(3, 20, 40), n_points=8))
|
| 34 |
+
|
| 35 |
+
.. _compatibility with NumPy 1.15: https://github.com/numpy/numpy/blob/master/doc/release/1.15.0-notes.rst#deprecations
|
| 36 |
+
|
| 37 |
+
Examples
|
| 38 |
+
--------
|
| 39 |
+
>>> ar = np.zeros((20, 40))
|
| 40 |
+
>>> g = regular_grid(ar.shape, 8)
|
| 41 |
+
>>> g
|
| 42 |
+
(slice(5, None, 10), slice(5, None, 10))
|
| 43 |
+
>>> ar[g] = 1
|
| 44 |
+
>>> ar.sum()
|
| 45 |
+
8.0
|
| 46 |
+
>>> ar = np.zeros((20, 40))
|
| 47 |
+
>>> g = regular_grid(ar.shape, 32)
|
| 48 |
+
>>> g
|
| 49 |
+
(slice(2, None, 5), slice(2, None, 5))
|
| 50 |
+
>>> ar[g] = 1
|
| 51 |
+
>>> ar.sum()
|
| 52 |
+
32.0
|
| 53 |
+
>>> ar = np.zeros((3, 20, 40))
|
| 54 |
+
>>> g = regular_grid(ar.shape, 8)
|
| 55 |
+
>>> g
|
| 56 |
+
(slice(1, None, 3), slice(5, None, 10), slice(5, None, 10))
|
| 57 |
+
>>> ar[g] = 1
|
| 58 |
+
>>> ar.sum()
|
| 59 |
+
8.0
|
| 60 |
+
"""
|
| 61 |
+
ar_shape = np.asanyarray(ar_shape)
|
| 62 |
+
ndim = len(ar_shape)
|
| 63 |
+
unsort_dim_idxs = np.argsort(np.argsort(ar_shape))
|
| 64 |
+
sorted_dims = np.sort(ar_shape)
|
| 65 |
+
space_size = float(np.prod(ar_shape))
|
| 66 |
+
if space_size <= n_points:
|
| 67 |
+
return (slice(None),) * ndim
|
| 68 |
+
stepsizes = np.full(ndim, (space_size / n_points) ** (1.0 / ndim), dtype='float64')
|
| 69 |
+
if (sorted_dims < stepsizes).any():
|
| 70 |
+
for dim in range(ndim):
|
| 71 |
+
stepsizes[dim] = sorted_dims[dim]
|
| 72 |
+
space_size = float(np.prod(sorted_dims[dim + 1 :]))
|
| 73 |
+
stepsizes[dim + 1 :] = (space_size / n_points) ** (1.0 / (ndim - dim - 1))
|
| 74 |
+
if (sorted_dims >= stepsizes).all():
|
| 75 |
+
break
|
| 76 |
+
starts = (stepsizes // 2).astype(int)
|
| 77 |
+
stepsizes = np.round(stepsizes).astype(int)
|
| 78 |
+
slices = [slice(start, None, step) for start, step in zip(starts, stepsizes)]
|
| 79 |
+
slices = tuple(slices[i] for i in unsort_dim_idxs)
|
| 80 |
+
return slices
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def regular_seeds(ar_shape, n_points, dtype=int):
|
| 84 |
+
"""Return an image with ~`n_points` regularly-spaced nonzero pixels.
|
| 85 |
+
|
| 86 |
+
Parameters
|
| 87 |
+
----------
|
| 88 |
+
ar_shape : tuple of int
|
| 89 |
+
The shape of the desired output image.
|
| 90 |
+
n_points : int
|
| 91 |
+
The desired number of nonzero points.
|
| 92 |
+
dtype : numpy data type, optional
|
| 93 |
+
The desired data type of the output.
|
| 94 |
+
|
| 95 |
+
Returns
|
| 96 |
+
-------
|
| 97 |
+
seed_img : array of int or bool
|
| 98 |
+
The desired image.
|
| 99 |
+
|
| 100 |
+
Examples
|
| 101 |
+
--------
|
| 102 |
+
>>> regular_seeds((5, 5), 4)
|
| 103 |
+
array([[0, 0, 0, 0, 0],
|
| 104 |
+
[0, 1, 0, 2, 0],
|
| 105 |
+
[0, 0, 0, 0, 0],
|
| 106 |
+
[0, 3, 0, 4, 0],
|
| 107 |
+
[0, 0, 0, 0, 0]])
|
| 108 |
+
"""
|
| 109 |
+
grid = regular_grid(ar_shape, n_points)
|
| 110 |
+
seed_img = np.zeros(ar_shape, dtype=dtype)
|
| 111 |
+
seed_img[grid] = 1 + np.reshape(
|
| 112 |
+
np.arange(seed_img[grid].size), seed_img[grid].shape
|
| 113 |
+
)
|
| 114 |
+
return seed_img
|
vlmpy310/lib/python3.10/site-packages/skimage/util/_slice_along_axes.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ['slice_along_axes']
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def slice_along_axes(image, slices, axes=None, copy=False):
|
| 5 |
+
"""Slice an image along given axes.
|
| 6 |
+
|
| 7 |
+
Parameters
|
| 8 |
+
----------
|
| 9 |
+
image : ndarray
|
| 10 |
+
Input image.
|
| 11 |
+
slices : list of 2-tuple (a, b) where a < b.
|
| 12 |
+
For each axis in `axes`, a corresponding 2-tuple
|
| 13 |
+
``(min_val, max_val)`` to slice with (as with Python slices,
|
| 14 |
+
``max_val`` is non-inclusive).
|
| 15 |
+
axes : int or tuple, optional
|
| 16 |
+
Axes corresponding to the limits given in `slices`. If None,
|
| 17 |
+
axes are in ascending order, up to the length of `slices`.
|
| 18 |
+
copy : bool, optional
|
| 19 |
+
If True, ensure that the output is not a view of `image`.
|
| 20 |
+
|
| 21 |
+
Returns
|
| 22 |
+
-------
|
| 23 |
+
out : ndarray
|
| 24 |
+
The region of `image` corresponding to the given slices and axes.
|
| 25 |
+
|
| 26 |
+
Examples
|
| 27 |
+
--------
|
| 28 |
+
>>> from skimage import data
|
| 29 |
+
>>> img = data.camera()
|
| 30 |
+
>>> img.shape
|
| 31 |
+
(512, 512)
|
| 32 |
+
>>> cropped_img = slice_along_axes(img, [(0, 100)])
|
| 33 |
+
>>> cropped_img.shape
|
| 34 |
+
(100, 512)
|
| 35 |
+
>>> cropped_img = slice_along_axes(img, [(0, 100), (0, 100)])
|
| 36 |
+
>>> cropped_img.shape
|
| 37 |
+
(100, 100)
|
| 38 |
+
>>> cropped_img = slice_along_axes(img, [(0, 100), (0, 75)], axes=[1, 0])
|
| 39 |
+
>>> cropped_img.shape
|
| 40 |
+
(75, 100)
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
# empty length of bounding box detected on None
|
| 44 |
+
if not slices:
|
| 45 |
+
return image
|
| 46 |
+
|
| 47 |
+
if axes is None:
|
| 48 |
+
axes = list(range(image.ndim))
|
| 49 |
+
if len(axes) < len(slices):
|
| 50 |
+
raise ValueError("More `slices` than available axes")
|
| 51 |
+
|
| 52 |
+
elif len(axes) != len(slices):
|
| 53 |
+
raise ValueError("`axes` and `slices` must have equal length")
|
| 54 |
+
|
| 55 |
+
if len(axes) != len(set(axes)):
|
| 56 |
+
raise ValueError("`axes` must be unique")
|
| 57 |
+
|
| 58 |
+
if not all(a >= 0 and a < image.ndim for a in axes):
|
| 59 |
+
raise ValueError(
|
| 60 |
+
f"axes {axes} out of range; image has only " f"{image.ndim} dimensions"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
_slices = [
|
| 64 |
+
slice(None),
|
| 65 |
+
] * image.ndim
|
| 66 |
+
for (a, b), ax in zip(slices, axes):
|
| 67 |
+
if a < 0:
|
| 68 |
+
a %= image.shape[ax]
|
| 69 |
+
if b < 0:
|
| 70 |
+
b %= image.shape[ax]
|
| 71 |
+
if a > b:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
f"Invalid slice ({a}, {b}): must be ordered `(min_val, max_val)`"
|
| 74 |
+
)
|
| 75 |
+
if a < 0 or b > image.shape[ax]:
|
| 76 |
+
raise ValueError(
|
| 77 |
+
f"Invalid slice ({a}, {b}) for image with dimensions {image.shape}"
|
| 78 |
+
)
|
| 79 |
+
_slices[ax] = slice(a, b)
|
| 80 |
+
|
| 81 |
+
image_slice = image[tuple(_slices)]
|
| 82 |
+
|
| 83 |
+
if copy and image_slice.base is not None:
|
| 84 |
+
image_slice = image_slice.copy()
|
| 85 |
+
|
| 86 |
+
return image_slice
|
vlmpy310/lib/python3.10/site-packages/skimage/util/apply_parallel.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
|
| 3 |
+
__all__ = ['apply_parallel']
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _get_chunks(shape, ncpu):
|
| 7 |
+
"""Split the array into equal sized chunks based on the number of
|
| 8 |
+
available processors. The last chunk in each dimension absorbs the
|
| 9 |
+
remainder array elements if the number of CPUs does not divide evenly into
|
| 10 |
+
the number of array elements.
|
| 11 |
+
|
| 12 |
+
Examples
|
| 13 |
+
--------
|
| 14 |
+
>>> _get_chunks((4, 4), 4)
|
| 15 |
+
((2, 2), (2, 2))
|
| 16 |
+
>>> _get_chunks((4, 4), 2)
|
| 17 |
+
((2, 2), (4,))
|
| 18 |
+
>>> _get_chunks((5, 5), 2)
|
| 19 |
+
((2, 3), (5,))
|
| 20 |
+
>>> _get_chunks((2, 4), 2)
|
| 21 |
+
((1, 1), (4,))
|
| 22 |
+
"""
|
| 23 |
+
# since apply_parallel is in the critical import path, we lazy import
|
| 24 |
+
# math just when we need it.
|
| 25 |
+
from math import ceil
|
| 26 |
+
|
| 27 |
+
chunks = []
|
| 28 |
+
nchunks_per_dim = int(ceil(ncpu ** (1.0 / len(shape))))
|
| 29 |
+
|
| 30 |
+
used_chunks = 1
|
| 31 |
+
for i in shape:
|
| 32 |
+
if used_chunks < ncpu:
|
| 33 |
+
regular_chunk = i // nchunks_per_dim
|
| 34 |
+
remainder_chunk = regular_chunk + (i % nchunks_per_dim)
|
| 35 |
+
|
| 36 |
+
if regular_chunk == 0:
|
| 37 |
+
chunk_lens = (remainder_chunk,)
|
| 38 |
+
else:
|
| 39 |
+
chunk_lens = (regular_chunk,) * (nchunks_per_dim - 1) + (
|
| 40 |
+
remainder_chunk,
|
| 41 |
+
)
|
| 42 |
+
else:
|
| 43 |
+
chunk_lens = (i,)
|
| 44 |
+
|
| 45 |
+
chunks.append(chunk_lens)
|
| 46 |
+
used_chunks *= nchunks_per_dim
|
| 47 |
+
return tuple(chunks)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _ensure_dask_array(array, chunks=None):
|
| 51 |
+
import dask.array as da
|
| 52 |
+
|
| 53 |
+
if isinstance(array, da.Array):
|
| 54 |
+
return array
|
| 55 |
+
|
| 56 |
+
return da.from_array(array, chunks=chunks)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def apply_parallel(
|
| 60 |
+
function,
|
| 61 |
+
array,
|
| 62 |
+
chunks=None,
|
| 63 |
+
depth=0,
|
| 64 |
+
mode=None,
|
| 65 |
+
extra_arguments=(),
|
| 66 |
+
extra_keywords=None,
|
| 67 |
+
*,
|
| 68 |
+
dtype=None,
|
| 69 |
+
compute=None,
|
| 70 |
+
channel_axis=None,
|
| 71 |
+
):
|
| 72 |
+
"""Map a function in parallel across an array.
|
| 73 |
+
|
| 74 |
+
Split an array into possibly overlapping chunks of a given depth and
|
| 75 |
+
boundary type, call the given function in parallel on the chunks, combine
|
| 76 |
+
the chunks and return the resulting array.
|
| 77 |
+
|
| 78 |
+
Parameters
|
| 79 |
+
----------
|
| 80 |
+
function : function
|
| 81 |
+
Function to be mapped which takes an array as an argument.
|
| 82 |
+
array : numpy array or dask array
|
| 83 |
+
Array which the function will be applied to.
|
| 84 |
+
chunks : int, tuple, or tuple of tuples, optional
|
| 85 |
+
A single integer is interpreted as the length of one side of a square
|
| 86 |
+
chunk that should be tiled across the array. One tuple of length
|
| 87 |
+
``array.ndim`` represents the shape of a chunk, and it is tiled across
|
| 88 |
+
the array. A list of tuples of length ``ndim``, where each sub-tuple
|
| 89 |
+
is a sequence of chunk sizes along the corresponding dimension. If
|
| 90 |
+
None, the array is broken up into chunks based on the number of
|
| 91 |
+
available cpus. More information about chunks is in the documentation
|
| 92 |
+
`here <https://dask.pydata.org/en/latest/array-design.html>`_. When
|
| 93 |
+
`channel_axis` is not None, the tuples can be length ``ndim - 1`` and
|
| 94 |
+
a single chunk will be used along the channel axis.
|
| 95 |
+
depth : int or sequence of int, optional
|
| 96 |
+
The depth of the added boundary cells. A tuple can be used to specify a
|
| 97 |
+
different depth per array axis. Defaults to zero. When `channel_axis`
|
| 98 |
+
is not None, and a tuple of length ``ndim - 1`` is provided, a depth of
|
| 99 |
+
0 will be used along the channel axis.
|
| 100 |
+
mode : {'reflect', 'symmetric', 'periodic', 'wrap', 'nearest', 'edge'}, optional
|
| 101 |
+
Type of external boundary padding.
|
| 102 |
+
extra_arguments : tuple, optional
|
| 103 |
+
Tuple of arguments to be passed to the function.
|
| 104 |
+
extra_keywords : dictionary, optional
|
| 105 |
+
Dictionary of keyword arguments to be passed to the function.
|
| 106 |
+
dtype : data-type or None, optional
|
| 107 |
+
The data-type of the `function` output. If None, Dask will attempt to
|
| 108 |
+
infer this by calling the function on data of shape ``(1,) * ndim``.
|
| 109 |
+
For functions expecting RGB or multichannel data this may be
|
| 110 |
+
problematic. In such cases, the user should manually specify this dtype
|
| 111 |
+
argument instead.
|
| 112 |
+
|
| 113 |
+
.. versionadded:: 0.18
|
| 114 |
+
``dtype`` was added in 0.18.
|
| 115 |
+
compute : bool, optional
|
| 116 |
+
If ``True``, compute eagerly returning a NumPy Array.
|
| 117 |
+
If ``False``, compute lazily returning a Dask Array.
|
| 118 |
+
If ``None`` (default), compute based on array type provided
|
| 119 |
+
(eagerly for NumPy Arrays and lazily for Dask Arrays).
|
| 120 |
+
channel_axis : int or None, optional
|
| 121 |
+
If None, the image is assumed to be a grayscale (single channel) image.
|
| 122 |
+
Otherwise, this parameter indicates which axis of the array corresponds
|
| 123 |
+
to channels.
|
| 124 |
+
|
| 125 |
+
Returns
|
| 126 |
+
-------
|
| 127 |
+
out : ndarray or dask Array
|
| 128 |
+
Returns the result of the applying the operation.
|
| 129 |
+
Type is dependent on the ``compute`` argument.
|
| 130 |
+
|
| 131 |
+
Notes
|
| 132 |
+
-----
|
| 133 |
+
Numpy edge modes 'symmetric', 'wrap', and 'edge' are converted to the
|
| 134 |
+
equivalent ``dask`` boundary modes 'reflect', 'periodic' and 'nearest',
|
| 135 |
+
respectively.
|
| 136 |
+
Setting ``compute=False`` can be useful for chaining later operations.
|
| 137 |
+
For example region selection to preview a result or storing large data
|
| 138 |
+
to disk instead of loading in memory.
|
| 139 |
+
|
| 140 |
+
"""
|
| 141 |
+
try:
|
| 142 |
+
# Importing dask takes time. since apply_parallel is on the
|
| 143 |
+
# minimum import path of skimage, we lazy attempt to import dask
|
| 144 |
+
import dask.array as da
|
| 145 |
+
except ImportError:
|
| 146 |
+
raise RuntimeError(
|
| 147 |
+
"Could not import 'dask'. Please install " "using 'pip install dask'"
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
if extra_keywords is None:
|
| 151 |
+
extra_keywords = {}
|
| 152 |
+
|
| 153 |
+
if compute is None:
|
| 154 |
+
compute = not isinstance(array, da.Array)
|
| 155 |
+
|
| 156 |
+
if channel_axis is not None:
|
| 157 |
+
channel_axis = channel_axis % array.ndim
|
| 158 |
+
|
| 159 |
+
if chunks is None:
|
| 160 |
+
shape = array.shape
|
| 161 |
+
try:
|
| 162 |
+
# since apply_parallel is in the critical import path, we lazy
|
| 163 |
+
# import multiprocessing just when we need it.
|
| 164 |
+
from multiprocessing import cpu_count
|
| 165 |
+
|
| 166 |
+
ncpu = cpu_count()
|
| 167 |
+
except NotImplementedError:
|
| 168 |
+
ncpu = 4
|
| 169 |
+
if channel_axis is not None:
|
| 170 |
+
# use a single chunk along the channel axis
|
| 171 |
+
spatial_shape = shape[:channel_axis] + shape[channel_axis + 1 :]
|
| 172 |
+
chunks = list(_get_chunks(spatial_shape, ncpu))
|
| 173 |
+
chunks.insert(channel_axis, shape[channel_axis])
|
| 174 |
+
chunks = tuple(chunks)
|
| 175 |
+
else:
|
| 176 |
+
chunks = _get_chunks(shape, ncpu)
|
| 177 |
+
elif channel_axis is not None and len(chunks) == array.ndim - 1:
|
| 178 |
+
# insert a single chunk along the channel axis
|
| 179 |
+
chunks = list(chunks)
|
| 180 |
+
chunks.insert(channel_axis, array.shape[channel_axis])
|
| 181 |
+
chunks = tuple(chunks)
|
| 182 |
+
|
| 183 |
+
if mode == 'wrap':
|
| 184 |
+
mode = 'periodic'
|
| 185 |
+
elif mode == 'symmetric':
|
| 186 |
+
mode = 'reflect'
|
| 187 |
+
elif mode == 'edge':
|
| 188 |
+
mode = 'nearest'
|
| 189 |
+
elif mode is None:
|
| 190 |
+
# default value for Dask.
|
| 191 |
+
# Note: that for dask >= 2022.03 it will change to 'none' so we set it
|
| 192 |
+
# here for consistent behavior across Dask versions.
|
| 193 |
+
mode = 'reflect'
|
| 194 |
+
|
| 195 |
+
if channel_axis is not None:
|
| 196 |
+
if numpy.isscalar(depth):
|
| 197 |
+
# depth is zero along channel_axis
|
| 198 |
+
depth = [depth] * (array.ndim - 1)
|
| 199 |
+
depth = list(depth)
|
| 200 |
+
if len(depth) == array.ndim - 1:
|
| 201 |
+
depth.insert(channel_axis, 0)
|
| 202 |
+
depth = tuple(depth)
|
| 203 |
+
|
| 204 |
+
def wrapped_func(arr):
|
| 205 |
+
return function(arr, *extra_arguments, **extra_keywords)
|
| 206 |
+
|
| 207 |
+
darr = _ensure_dask_array(array, chunks=chunks)
|
| 208 |
+
|
| 209 |
+
res = darr.map_overlap(wrapped_func, depth, boundary=mode, dtype=dtype)
|
| 210 |
+
if compute:
|
| 211 |
+
res = res.compute()
|
| 212 |
+
|
| 213 |
+
return res
|
vlmpy310/lib/python3.10/site-packages/skimage/util/arraycrop.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The arraycrop module contains functions to crop values from the edges of an
|
| 3 |
+
n-dimensional array.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numbers import Integral
|
| 8 |
+
|
| 9 |
+
__all__ = ['crop']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def crop(ar, crop_width, copy=False, order='K'):
|
| 13 |
+
"""Crop array `ar` by `crop_width` along each dimension.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
ar : array-like of rank N
|
| 18 |
+
Input array.
|
| 19 |
+
crop_width : {sequence, int}
|
| 20 |
+
Number of values to remove from the edges of each axis.
|
| 21 |
+
``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies
|
| 22 |
+
unique crop widths at the start and end of each axis.
|
| 23 |
+
``((before, after),) or (before, after)`` specifies
|
| 24 |
+
a fixed start and end crop for every axis.
|
| 25 |
+
``(n,)`` or ``n`` for integer ``n`` is a shortcut for
|
| 26 |
+
before = after = ``n`` for all axes.
|
| 27 |
+
copy : bool, optional
|
| 28 |
+
If `True`, ensure the returned array is a contiguous copy. Normally,
|
| 29 |
+
a crop operation will return a discontiguous view of the underlying
|
| 30 |
+
input array.
|
| 31 |
+
order : {'C', 'F', 'A', 'K'}, optional
|
| 32 |
+
If ``copy==True``, control the memory layout of the copy. See
|
| 33 |
+
``np.copy``.
|
| 34 |
+
|
| 35 |
+
Returns
|
| 36 |
+
-------
|
| 37 |
+
cropped : array
|
| 38 |
+
The cropped array. If ``copy=False`` (default), this is a sliced
|
| 39 |
+
view of the input array.
|
| 40 |
+
"""
|
| 41 |
+
ar = np.array(ar, copy=False)
|
| 42 |
+
|
| 43 |
+
if isinstance(crop_width, Integral):
|
| 44 |
+
crops = [[crop_width, crop_width]] * ar.ndim
|
| 45 |
+
elif isinstance(crop_width[0], Integral):
|
| 46 |
+
if len(crop_width) == 1:
|
| 47 |
+
crops = [[crop_width[0], crop_width[0]]] * ar.ndim
|
| 48 |
+
elif len(crop_width) == 2:
|
| 49 |
+
crops = [crop_width] * ar.ndim
|
| 50 |
+
else:
|
| 51 |
+
raise ValueError(
|
| 52 |
+
f'crop_width has an invalid length: {len(crop_width)}\n'
|
| 53 |
+
f'crop_width should be a sequence of N pairs, '
|
| 54 |
+
f'a single pair, or a single integer'
|
| 55 |
+
)
|
| 56 |
+
elif len(crop_width) == 1:
|
| 57 |
+
crops = [crop_width[0]] * ar.ndim
|
| 58 |
+
elif len(crop_width) == ar.ndim:
|
| 59 |
+
crops = crop_width
|
| 60 |
+
else:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
f'crop_width has an invalid length: {len(crop_width)}\n'
|
| 63 |
+
f'crop_width should be a sequence of N pairs, '
|
| 64 |
+
f'a single pair, or a single integer'
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
slices = tuple(slice(a, ar.shape[i] - b) for i, (a, b) in enumerate(crops))
|
| 68 |
+
if copy:
|
| 69 |
+
cropped = np.array(ar[slices], order=order, copy=True)
|
| 70 |
+
else:
|
| 71 |
+
cropped = ar[slices]
|
| 72 |
+
return cropped
|
vlmpy310/lib/python3.10/site-packages/skimage/util/compare.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import warnings
|
| 3 |
+
from itertools import product
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from .dtype import img_as_float
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _rename_image_params(func):
|
| 11 |
+
wm_images = (
|
| 12 |
+
"Since version 0.24, the two input images are named `image0` and "
|
| 13 |
+
"`image1` (instead of `image1` and `image2`, respectively). Please use "
|
| 14 |
+
"`image0, image1` to avoid this warning for now, and avoid an error "
|
| 15 |
+
"from version 0.26 onwards."
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
wm_method = (
|
| 19 |
+
"Starting in version 0.24, all arguments following `image0, image1` "
|
| 20 |
+
"(including `method`) will be keyword-only. Please pass `method=` "
|
| 21 |
+
"in the function call to avoid this warning for now, and avoid an error "
|
| 22 |
+
"from version 0.26 onwards."
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
@functools.wraps(func)
|
| 26 |
+
def wrapper(*args, **kwargs):
|
| 27 |
+
# Turn all args into kwargs
|
| 28 |
+
for i, (value, param) in enumerate(
|
| 29 |
+
zip(args, ["image0", "image1", "method", "n_tiles"])
|
| 30 |
+
):
|
| 31 |
+
if i >= 2:
|
| 32 |
+
warnings.warn(wm_method, category=FutureWarning, stacklevel=2)
|
| 33 |
+
if param in kwargs:
|
| 34 |
+
raise ValueError(
|
| 35 |
+
f"{param} passed both as positional and keyword argument."
|
| 36 |
+
)
|
| 37 |
+
else:
|
| 38 |
+
kwargs[param] = value
|
| 39 |
+
args = tuple()
|
| 40 |
+
|
| 41 |
+
# Account for `image2` if given
|
| 42 |
+
if "image2" in kwargs.keys():
|
| 43 |
+
warnings.warn(wm_images, category=FutureWarning, stacklevel=2)
|
| 44 |
+
|
| 45 |
+
# Safely move `image2` to `image1` if that's empty
|
| 46 |
+
if "image1" in kwargs.keys():
|
| 47 |
+
# Safely move `image1` to `image0`
|
| 48 |
+
if "image0" in kwargs.keys():
|
| 49 |
+
raise ValueError(
|
| 50 |
+
"Three input images given; please use only `image0` "
|
| 51 |
+
"and `image1`."
|
| 52 |
+
)
|
| 53 |
+
kwargs["image0"] = kwargs.pop("image1")
|
| 54 |
+
kwargs["image1"] = kwargs.pop("image2")
|
| 55 |
+
|
| 56 |
+
return func(*args, **kwargs)
|
| 57 |
+
|
| 58 |
+
return wrapper
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@_rename_image_params
|
| 62 |
+
def compare_images(image0, image1, *, method='diff', n_tiles=(8, 8)):
|
| 63 |
+
"""
|
| 64 |
+
Return an image showing the differences between two images.
|
| 65 |
+
|
| 66 |
+
.. versionadded:: 0.16
|
| 67 |
+
|
| 68 |
+
Parameters
|
| 69 |
+
----------
|
| 70 |
+
image0, image1 : ndarray, shape (M, N)
|
| 71 |
+
Images to process, must be of the same shape.
|
| 72 |
+
|
| 73 |
+
.. versionchanged:: 0.24
|
| 74 |
+
`image1` and `image2` were renamed into `image0` and `image1`
|
| 75 |
+
respectively.
|
| 76 |
+
method : string, optional
|
| 77 |
+
Method used for the comparison.
|
| 78 |
+
Valid values are {'diff', 'blend', 'checkerboard'}.
|
| 79 |
+
Details are provided in the note section.
|
| 80 |
+
|
| 81 |
+
.. versionchanged:: 0.24
|
| 82 |
+
This parameter and following ones are keyword-only.
|
| 83 |
+
n_tiles : tuple, optional
|
| 84 |
+
Used only for the `checkerboard` method. Specifies the number
|
| 85 |
+
of tiles (row, column) to divide the image.
|
| 86 |
+
|
| 87 |
+
Returns
|
| 88 |
+
-------
|
| 89 |
+
comparison : ndarray, shape (M, N)
|
| 90 |
+
Image showing the differences.
|
| 91 |
+
|
| 92 |
+
Notes
|
| 93 |
+
-----
|
| 94 |
+
``'diff'`` computes the absolute difference between the two images.
|
| 95 |
+
``'blend'`` computes the mean value.
|
| 96 |
+
``'checkerboard'`` makes tiles of dimension `n_tiles` that display
|
| 97 |
+
alternatively the first and the second image. Note that images must be
|
| 98 |
+
2-dimensional to be compared with the checkerboard method.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
if image1.shape != image0.shape:
|
| 102 |
+
raise ValueError('Images must have the same shape.')
|
| 103 |
+
|
| 104 |
+
img1 = img_as_float(image0)
|
| 105 |
+
img2 = img_as_float(image1)
|
| 106 |
+
|
| 107 |
+
if method == 'diff':
|
| 108 |
+
comparison = np.abs(img2 - img1)
|
| 109 |
+
elif method == 'blend':
|
| 110 |
+
comparison = 0.5 * (img2 + img1)
|
| 111 |
+
elif method == 'checkerboard':
|
| 112 |
+
if img1.ndim != 2:
|
| 113 |
+
raise ValueError(
|
| 114 |
+
'Images must be 2-dimensional to be compared with the '
|
| 115 |
+
'checkerboard method.'
|
| 116 |
+
)
|
| 117 |
+
shapex, shapey = img1.shape
|
| 118 |
+
mask = np.full((shapex, shapey), False)
|
| 119 |
+
stepx = int(shapex / n_tiles[0])
|
| 120 |
+
stepy = int(shapey / n_tiles[1])
|
| 121 |
+
for i, j in product(range(n_tiles[0]), range(n_tiles[1])):
|
| 122 |
+
if (i + j) % 2 == 0:
|
| 123 |
+
mask[i * stepx : (i + 1) * stepx, j * stepy : (j + 1) * stepy] = True
|
| 124 |
+
comparison = np.zeros_like(img1)
|
| 125 |
+
comparison[mask] = img1[mask]
|
| 126 |
+
comparison[~mask] = img2[~mask]
|
| 127 |
+
else:
|
| 128 |
+
raise ValueError(
|
| 129 |
+
'Wrong value for `method`. '
|
| 130 |
+
'Must be either "diff", "blend" or "checkerboard".'
|
| 131 |
+
)
|
| 132 |
+
return comparison
|
vlmpy310/lib/python3.10/site-packages/skimage/util/dtype.py
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
from warnings import warn
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
'img_as_float32',
|
| 9 |
+
'img_as_float64',
|
| 10 |
+
'img_as_float',
|
| 11 |
+
'img_as_int',
|
| 12 |
+
'img_as_uint',
|
| 13 |
+
'img_as_ubyte',
|
| 14 |
+
'img_as_bool',
|
| 15 |
+
'dtype_limits',
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
# Some of these may or may not be aliases depending on architecture & platform
|
| 19 |
+
_integer_types = (
|
| 20 |
+
np.int8,
|
| 21 |
+
np.byte,
|
| 22 |
+
np.int16,
|
| 23 |
+
np.short,
|
| 24 |
+
np.int32,
|
| 25 |
+
np.int64,
|
| 26 |
+
np.longlong,
|
| 27 |
+
np.int_,
|
| 28 |
+
np.intp,
|
| 29 |
+
np.intc,
|
| 30 |
+
int,
|
| 31 |
+
np.uint8,
|
| 32 |
+
np.ubyte,
|
| 33 |
+
np.uint16,
|
| 34 |
+
np.ushort,
|
| 35 |
+
np.uint32,
|
| 36 |
+
np.uint64,
|
| 37 |
+
np.ulonglong,
|
| 38 |
+
np.uint,
|
| 39 |
+
np.uintp,
|
| 40 |
+
np.uintc,
|
| 41 |
+
)
|
| 42 |
+
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}
|
| 43 |
+
dtype_range = {
|
| 44 |
+
bool: (False, True),
|
| 45 |
+
np.bool_: (False, True),
|
| 46 |
+
float: (-1, 1),
|
| 47 |
+
np.float16: (-1, 1),
|
| 48 |
+
np.float32: (-1, 1),
|
| 49 |
+
np.float64: (-1, 1),
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
with warnings.catch_warnings():
|
| 53 |
+
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
| 54 |
+
|
| 55 |
+
# np.bool8 is a deprecated alias of np.bool_
|
| 56 |
+
if hasattr(np, 'bool8'):
|
| 57 |
+
dtype_range[np.bool8] = (False, True)
|
| 58 |
+
|
| 59 |
+
dtype_range.update(_integer_ranges)
|
| 60 |
+
|
| 61 |
+
_supported_types = list(dtype_range.keys())
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def dtype_limits(image, clip_negative=False):
|
| 65 |
+
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
|
| 66 |
+
|
| 67 |
+
Parameters
|
| 68 |
+
----------
|
| 69 |
+
image : ndarray
|
| 70 |
+
Input image.
|
| 71 |
+
clip_negative : bool, optional
|
| 72 |
+
If True, clip the negative range (i.e. return 0 for min intensity)
|
| 73 |
+
even if the image dtype allows negative values.
|
| 74 |
+
|
| 75 |
+
Returns
|
| 76 |
+
-------
|
| 77 |
+
imin, imax : tuple
|
| 78 |
+
Lower and upper intensity limits.
|
| 79 |
+
"""
|
| 80 |
+
imin, imax = dtype_range[image.dtype.type]
|
| 81 |
+
if clip_negative:
|
| 82 |
+
imin = 0
|
| 83 |
+
return imin, imax
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _dtype_itemsize(itemsize, *dtypes):
|
| 87 |
+
"""Return first of `dtypes` with itemsize greater than `itemsize`
|
| 88 |
+
|
| 89 |
+
Parameters
|
| 90 |
+
----------
|
| 91 |
+
itemsize: int
|
| 92 |
+
The data type object element size.
|
| 93 |
+
|
| 94 |
+
Other Parameters
|
| 95 |
+
----------------
|
| 96 |
+
*dtypes:
|
| 97 |
+
Any Object accepted by `np.dtype` to be converted to a data
|
| 98 |
+
type object
|
| 99 |
+
|
| 100 |
+
Returns
|
| 101 |
+
-------
|
| 102 |
+
dtype: data type object
|
| 103 |
+
First of `dtypes` with itemsize greater than `itemsize`.
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _dtype_bits(kind, bits, itemsize=1):
|
| 110 |
+
"""Return dtype of `kind` that can store a `bits` wide unsigned int
|
| 111 |
+
|
| 112 |
+
Parameters:
|
| 113 |
+
kind: str
|
| 114 |
+
Data type kind.
|
| 115 |
+
bits: int
|
| 116 |
+
Desired number of bits.
|
| 117 |
+
itemsize: int
|
| 118 |
+
The data type object element size.
|
| 119 |
+
|
| 120 |
+
Returns
|
| 121 |
+
-------
|
| 122 |
+
dtype: data type object
|
| 123 |
+
Data type of `kind` that can store a `bits` wide unsigned int
|
| 124 |
+
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
s = next(
|
| 128 |
+
i
|
| 129 |
+
for i in (itemsize,) + (2, 4, 8)
|
| 130 |
+
if bits < (i * 8) or (bits == (i * 8) and kind == 'u')
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
return np.dtype(kind + str(s))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _scale(a, n, m, copy=True):
|
| 137 |
+
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
|
| 138 |
+
|
| 139 |
+
Numbers can be represented exactly only if `m` is a multiple of `n`.
|
| 140 |
+
|
| 141 |
+
Parameters
|
| 142 |
+
----------
|
| 143 |
+
a : ndarray
|
| 144 |
+
Input image array.
|
| 145 |
+
n : int
|
| 146 |
+
Number of bits currently used to encode the values in `a`.
|
| 147 |
+
m : int
|
| 148 |
+
Desired number of bits to encode the values in `out`.
|
| 149 |
+
copy : bool, optional
|
| 150 |
+
If True, allocates and returns new array. Otherwise, modifies
|
| 151 |
+
`a` in place.
|
| 152 |
+
|
| 153 |
+
Returns
|
| 154 |
+
-------
|
| 155 |
+
out : array
|
| 156 |
+
Output image array. Has the same kind as `a`.
|
| 157 |
+
"""
|
| 158 |
+
kind = a.dtype.kind
|
| 159 |
+
if n > m and a.max() < 2**m:
|
| 160 |
+
mnew = int(np.ceil(m / 2) * 2)
|
| 161 |
+
if mnew > m:
|
| 162 |
+
dtype = f'int{mnew}'
|
| 163 |
+
else:
|
| 164 |
+
dtype = f'uint{mnew}'
|
| 165 |
+
n = int(np.ceil(n / 2) * 2)
|
| 166 |
+
warn(
|
| 167 |
+
f'Downcasting {a.dtype} to {dtype} without scaling because max '
|
| 168 |
+
f'value {a.max()} fits in {dtype}',
|
| 169 |
+
stacklevel=3,
|
| 170 |
+
)
|
| 171 |
+
return a.astype(_dtype_bits(kind, m))
|
| 172 |
+
elif n == m:
|
| 173 |
+
return a.copy() if copy else a
|
| 174 |
+
elif n > m:
|
| 175 |
+
# downscale with precision loss
|
| 176 |
+
if copy:
|
| 177 |
+
b = np.empty(a.shape, _dtype_bits(kind, m))
|
| 178 |
+
np.floor_divide(a, 2 ** (n - m), out=b, dtype=a.dtype, casting='unsafe')
|
| 179 |
+
return b
|
| 180 |
+
else:
|
| 181 |
+
a //= 2 ** (n - m)
|
| 182 |
+
return a
|
| 183 |
+
elif m % n == 0:
|
| 184 |
+
# exact upscale to a multiple of `n` bits
|
| 185 |
+
if copy:
|
| 186 |
+
b = np.empty(a.shape, _dtype_bits(kind, m))
|
| 187 |
+
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
| 188 |
+
return b
|
| 189 |
+
else:
|
| 190 |
+
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
|
| 191 |
+
a *= (2**m - 1) // (2**n - 1)
|
| 192 |
+
return a
|
| 193 |
+
else:
|
| 194 |
+
# upscale to a multiple of `n` bits,
|
| 195 |
+
# then downscale with precision loss
|
| 196 |
+
o = (m // n + 1) * n
|
| 197 |
+
if copy:
|
| 198 |
+
b = np.empty(a.shape, _dtype_bits(kind, o))
|
| 199 |
+
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
| 200 |
+
b //= 2 ** (o - m)
|
| 201 |
+
return b
|
| 202 |
+
else:
|
| 203 |
+
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
|
| 204 |
+
a *= (2**o - 1) // (2**n - 1)
|
| 205 |
+
a //= 2 ** (o - m)
|
| 206 |
+
return a
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _convert(image, dtype, force_copy=False, uniform=False):
|
| 210 |
+
"""
|
| 211 |
+
Convert an image to the requested data-type.
|
| 212 |
+
|
| 213 |
+
Warnings are issued in case of precision loss, or when negative values
|
| 214 |
+
are clipped during conversion to unsigned integer types (sign loss).
|
| 215 |
+
|
| 216 |
+
Floating point values are expected to be normalized and will be clipped
|
| 217 |
+
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
|
| 218 |
+
signed integers respectively.
|
| 219 |
+
|
| 220 |
+
Numbers are not shifted to the negative side when converting from
|
| 221 |
+
unsigned to signed integer types. Negative values will be clipped when
|
| 222 |
+
converting to unsigned integers.
|
| 223 |
+
|
| 224 |
+
Parameters
|
| 225 |
+
----------
|
| 226 |
+
image : ndarray
|
| 227 |
+
Input image.
|
| 228 |
+
dtype : dtype
|
| 229 |
+
Target data-type.
|
| 230 |
+
force_copy : bool, optional
|
| 231 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 232 |
+
uniform : bool, optional
|
| 233 |
+
Uniformly quantize the floating point range to the integer range.
|
| 234 |
+
By default (uniform=False) floating point values are scaled and
|
| 235 |
+
rounded to the nearest integers, which minimizes back and forth
|
| 236 |
+
conversion errors.
|
| 237 |
+
|
| 238 |
+
.. versionchanged:: 0.15
|
| 239 |
+
``_convert`` no longer warns about possible precision or sign
|
| 240 |
+
information loss. See discussions on these warnings at:
|
| 241 |
+
https://github.com/scikit-image/scikit-image/issues/2602
|
| 242 |
+
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
|
| 243 |
+
https://github.com/scikit-image/scikit-image/pull/3575
|
| 244 |
+
|
| 245 |
+
References
|
| 246 |
+
----------
|
| 247 |
+
.. [1] DirectX data conversion rules.
|
| 248 |
+
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
|
| 249 |
+
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
|
| 250 |
+
pp 7-8. Khronos Group, 2010.
|
| 251 |
+
.. [3] Proper treatment of pixels as integers. A.W. Paeth.
|
| 252 |
+
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
|
| 253 |
+
.. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
|
| 254 |
+
pp 47-57. Morgan Kaufmann, 1998.
|
| 255 |
+
|
| 256 |
+
"""
|
| 257 |
+
image = np.asarray(image)
|
| 258 |
+
dtypeobj_in = image.dtype
|
| 259 |
+
if dtype is np.floating:
|
| 260 |
+
dtypeobj_out = np.dtype('float64')
|
| 261 |
+
else:
|
| 262 |
+
dtypeobj_out = np.dtype(dtype)
|
| 263 |
+
dtype_in = dtypeobj_in.type
|
| 264 |
+
dtype_out = dtypeobj_out.type
|
| 265 |
+
kind_in = dtypeobj_in.kind
|
| 266 |
+
kind_out = dtypeobj_out.kind
|
| 267 |
+
itemsize_in = dtypeobj_in.itemsize
|
| 268 |
+
itemsize_out = dtypeobj_out.itemsize
|
| 269 |
+
|
| 270 |
+
# Below, we do an `issubdtype` check. Its purpose is to find out
|
| 271 |
+
# whether we can get away without doing any image conversion. This happens
|
| 272 |
+
# when:
|
| 273 |
+
#
|
| 274 |
+
# - the output and input dtypes are the same or
|
| 275 |
+
# - when the output is specified as a type, and the input dtype
|
| 276 |
+
# is a subclass of that type (e.g. `np.floating` will allow
|
| 277 |
+
# `float32` and `float64` arrays through)
|
| 278 |
+
|
| 279 |
+
if np.issubdtype(dtype_in, dtype):
|
| 280 |
+
if force_copy:
|
| 281 |
+
image = image.copy()
|
| 282 |
+
return image
|
| 283 |
+
|
| 284 |
+
if not (dtype_in in _supported_types and dtype_out in _supported_types):
|
| 285 |
+
raise ValueError(f'Cannot convert from {dtypeobj_in} to ' f'{dtypeobj_out}.')
|
| 286 |
+
|
| 287 |
+
if kind_in in 'ui':
|
| 288 |
+
imin_in = np.iinfo(dtype_in).min
|
| 289 |
+
imax_in = np.iinfo(dtype_in).max
|
| 290 |
+
if kind_out in 'ui':
|
| 291 |
+
imin_out = np.iinfo(dtype_out).min
|
| 292 |
+
imax_out = np.iinfo(dtype_out).max
|
| 293 |
+
|
| 294 |
+
# any -> binary
|
| 295 |
+
if kind_out == 'b':
|
| 296 |
+
return image > dtype_in(dtype_range[dtype_in][1] / 2)
|
| 297 |
+
|
| 298 |
+
# binary -> any
|
| 299 |
+
if kind_in == 'b':
|
| 300 |
+
result = image.astype(dtype_out)
|
| 301 |
+
if kind_out != 'f':
|
| 302 |
+
result *= dtype_out(dtype_range[dtype_out][1])
|
| 303 |
+
return result
|
| 304 |
+
|
| 305 |
+
# float -> any
|
| 306 |
+
if kind_in == 'f':
|
| 307 |
+
if kind_out == 'f':
|
| 308 |
+
# float -> float
|
| 309 |
+
return image.astype(dtype_out)
|
| 310 |
+
|
| 311 |
+
if np.min(image) < -1.0 or np.max(image) > 1.0:
|
| 312 |
+
raise ValueError("Images of type float must be between -1 and 1.")
|
| 313 |
+
# floating point -> integer
|
| 314 |
+
# use float type that can represent output integer type
|
| 315 |
+
computation_type = _dtype_itemsize(
|
| 316 |
+
itemsize_out, dtype_in, np.float32, np.float64
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
if not uniform:
|
| 320 |
+
if kind_out == 'u':
|
| 321 |
+
image_out = np.multiply(image, imax_out, dtype=computation_type)
|
| 322 |
+
else:
|
| 323 |
+
image_out = np.multiply(
|
| 324 |
+
image, (imax_out - imin_out) / 2, dtype=computation_type
|
| 325 |
+
)
|
| 326 |
+
image_out -= 1.0 / 2.0
|
| 327 |
+
np.rint(image_out, out=image_out)
|
| 328 |
+
np.clip(image_out, imin_out, imax_out, out=image_out)
|
| 329 |
+
elif kind_out == 'u':
|
| 330 |
+
image_out = np.multiply(image, imax_out + 1, dtype=computation_type)
|
| 331 |
+
np.clip(image_out, 0, imax_out, out=image_out)
|
| 332 |
+
else:
|
| 333 |
+
image_out = np.multiply(
|
| 334 |
+
image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type
|
| 335 |
+
)
|
| 336 |
+
np.floor(image_out, out=image_out)
|
| 337 |
+
np.clip(image_out, imin_out, imax_out, out=image_out)
|
| 338 |
+
return image_out.astype(dtype_out)
|
| 339 |
+
|
| 340 |
+
# signed/unsigned int -> float
|
| 341 |
+
if kind_out == 'f':
|
| 342 |
+
# use float type that can exactly represent input integers
|
| 343 |
+
computation_type = _dtype_itemsize(
|
| 344 |
+
itemsize_in, dtype_out, np.float32, np.float64
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
if kind_in == 'u':
|
| 348 |
+
# using np.divide or np.multiply doesn't copy the data
|
| 349 |
+
# until the computation time
|
| 350 |
+
image = np.multiply(image, 1.0 / imax_in, dtype=computation_type)
|
| 351 |
+
# DirectX uses this conversion also for signed ints
|
| 352 |
+
# if imin_in:
|
| 353 |
+
# np.maximum(image, -1.0, out=image)
|
| 354 |
+
elif kind_in == 'i':
|
| 355 |
+
# From DirectX conversions:
|
| 356 |
+
# The most negative value maps to -1.0f
|
| 357 |
+
# Every other value is converted to a float (call it c)
|
| 358 |
+
# and then result = c * (1.0f / (2⁽ⁿ⁻¹⁾-1)).
|
| 359 |
+
|
| 360 |
+
image = np.multiply(image, 1.0 / imax_in, dtype=computation_type)
|
| 361 |
+
np.maximum(image, -1.0, out=image)
|
| 362 |
+
|
| 363 |
+
else:
|
| 364 |
+
image = np.add(image, 0.5, dtype=computation_type)
|
| 365 |
+
image *= 2 / (imax_in - imin_in)
|
| 366 |
+
|
| 367 |
+
return np.asarray(image, dtype_out)
|
| 368 |
+
|
| 369 |
+
# unsigned int -> signed/unsigned int
|
| 370 |
+
if kind_in == 'u':
|
| 371 |
+
if kind_out == 'i':
|
| 372 |
+
# unsigned int -> signed int
|
| 373 |
+
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
|
| 374 |
+
return image.view(dtype_out)
|
| 375 |
+
else:
|
| 376 |
+
# unsigned int -> unsigned int
|
| 377 |
+
return _scale(image, 8 * itemsize_in, 8 * itemsize_out)
|
| 378 |
+
|
| 379 |
+
# signed int -> unsigned int
|
| 380 |
+
if kind_out == 'u':
|
| 381 |
+
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
|
| 382 |
+
result = np.empty(image.shape, dtype_out)
|
| 383 |
+
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
|
| 384 |
+
return result
|
| 385 |
+
|
| 386 |
+
# signed int -> signed int
|
| 387 |
+
if itemsize_in > itemsize_out:
|
| 388 |
+
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)
|
| 389 |
+
|
| 390 |
+
image = image.astype(_dtype_bits('i', itemsize_out * 8))
|
| 391 |
+
image -= imin_in
|
| 392 |
+
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
|
| 393 |
+
image += imin_out
|
| 394 |
+
return image.astype(dtype_out)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def convert(image, dtype, force_copy=False, uniform=False):
|
| 398 |
+
warn(
|
| 399 |
+
"The use of this function is discouraged as its behavior may change "
|
| 400 |
+
"dramatically in scikit-image 1.0. This function will be removed "
|
| 401 |
+
"in scikit-image 1.0.",
|
| 402 |
+
FutureWarning,
|
| 403 |
+
stacklevel=2,
|
| 404 |
+
)
|
| 405 |
+
return _convert(image=image, dtype=dtype, force_copy=force_copy, uniform=uniform)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
if _convert.__doc__ is not None:
|
| 409 |
+
convert.__doc__ = (
|
| 410 |
+
_convert.__doc__
|
| 411 |
+
+ """
|
| 412 |
+
|
| 413 |
+
Warns
|
| 414 |
+
-----
|
| 415 |
+
FutureWarning:
|
| 416 |
+
.. versionadded:: 0.17
|
| 417 |
+
|
| 418 |
+
The use of this function is discouraged as its behavior may change
|
| 419 |
+
dramatically in scikit-image 1.0. This function will be removed
|
| 420 |
+
in scikit-image 1.0.
|
| 421 |
+
"""
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def img_as_float32(image, force_copy=False):
|
| 426 |
+
"""Convert an image to single-precision (32-bit) floating point format.
|
| 427 |
+
|
| 428 |
+
Parameters
|
| 429 |
+
----------
|
| 430 |
+
image : ndarray
|
| 431 |
+
Input image.
|
| 432 |
+
force_copy : bool, optional
|
| 433 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 434 |
+
|
| 435 |
+
Returns
|
| 436 |
+
-------
|
| 437 |
+
out : ndarray of float32
|
| 438 |
+
Output image.
|
| 439 |
+
|
| 440 |
+
Notes
|
| 441 |
+
-----
|
| 442 |
+
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
| 443 |
+
converting from unsigned or signed datatypes, respectively.
|
| 444 |
+
If the input image has a float type, intensity values are not modified
|
| 445 |
+
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
| 446 |
+
|
| 447 |
+
"""
|
| 448 |
+
return _convert(image, np.float32, force_copy)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def img_as_float64(image, force_copy=False):
|
| 452 |
+
"""Convert an image to double-precision (64-bit) floating point format.
|
| 453 |
+
|
| 454 |
+
Parameters
|
| 455 |
+
----------
|
| 456 |
+
image : ndarray
|
| 457 |
+
Input image.
|
| 458 |
+
force_copy : bool, optional
|
| 459 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 460 |
+
|
| 461 |
+
Returns
|
| 462 |
+
-------
|
| 463 |
+
out : ndarray of float64
|
| 464 |
+
Output image.
|
| 465 |
+
|
| 466 |
+
Notes
|
| 467 |
+
-----
|
| 468 |
+
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
| 469 |
+
converting from unsigned or signed datatypes, respectively.
|
| 470 |
+
If the input image has a float type, intensity values are not modified
|
| 471 |
+
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
| 472 |
+
|
| 473 |
+
"""
|
| 474 |
+
return _convert(image, np.float64, force_copy)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def img_as_float(image, force_copy=False):
|
| 478 |
+
"""Convert an image to floating point format.
|
| 479 |
+
|
| 480 |
+
This function is similar to `img_as_float64`, but will not convert
|
| 481 |
+
lower-precision floating point arrays to `float64`.
|
| 482 |
+
|
| 483 |
+
Parameters
|
| 484 |
+
----------
|
| 485 |
+
image : ndarray
|
| 486 |
+
Input image.
|
| 487 |
+
force_copy : bool, optional
|
| 488 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 489 |
+
|
| 490 |
+
Returns
|
| 491 |
+
-------
|
| 492 |
+
out : ndarray of float
|
| 493 |
+
Output image.
|
| 494 |
+
|
| 495 |
+
Notes
|
| 496 |
+
-----
|
| 497 |
+
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
| 498 |
+
converting from unsigned or signed datatypes, respectively.
|
| 499 |
+
If the input image has a float type, intensity values are not modified
|
| 500 |
+
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
| 501 |
+
|
| 502 |
+
"""
|
| 503 |
+
return _convert(image, np.floating, force_copy)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def img_as_uint(image, force_copy=False):
|
| 507 |
+
"""Convert an image to 16-bit unsigned integer format.
|
| 508 |
+
|
| 509 |
+
Parameters
|
| 510 |
+
----------
|
| 511 |
+
image : ndarray
|
| 512 |
+
Input image.
|
| 513 |
+
force_copy : bool, optional
|
| 514 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 515 |
+
|
| 516 |
+
Returns
|
| 517 |
+
-------
|
| 518 |
+
out : ndarray of uint16
|
| 519 |
+
Output image.
|
| 520 |
+
|
| 521 |
+
Notes
|
| 522 |
+
-----
|
| 523 |
+
Negative input values will be clipped.
|
| 524 |
+
Positive values are scaled between 0 and 65535.
|
| 525 |
+
|
| 526 |
+
"""
|
| 527 |
+
return _convert(image, np.uint16, force_copy)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def img_as_int(image, force_copy=False):
|
| 531 |
+
"""Convert an image to 16-bit signed integer format.
|
| 532 |
+
|
| 533 |
+
Parameters
|
| 534 |
+
----------
|
| 535 |
+
image : ndarray
|
| 536 |
+
Input image.
|
| 537 |
+
force_copy : bool, optional
|
| 538 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 539 |
+
|
| 540 |
+
Returns
|
| 541 |
+
-------
|
| 542 |
+
out : ndarray of int16
|
| 543 |
+
Output image.
|
| 544 |
+
|
| 545 |
+
Notes
|
| 546 |
+
-----
|
| 547 |
+
The values are scaled between -32768 and 32767.
|
| 548 |
+
If the input data-type is positive-only (e.g., uint8), then
|
| 549 |
+
the output image will still only have positive values.
|
| 550 |
+
|
| 551 |
+
"""
|
| 552 |
+
return _convert(image, np.int16, force_copy)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def img_as_ubyte(image, force_copy=False):
|
| 556 |
+
"""Convert an image to 8-bit unsigned integer format.
|
| 557 |
+
|
| 558 |
+
Parameters
|
| 559 |
+
----------
|
| 560 |
+
image : ndarray
|
| 561 |
+
Input image.
|
| 562 |
+
force_copy : bool, optional
|
| 563 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 564 |
+
|
| 565 |
+
Returns
|
| 566 |
+
-------
|
| 567 |
+
out : ndarray of ubyte (uint8)
|
| 568 |
+
Output image.
|
| 569 |
+
|
| 570 |
+
Notes
|
| 571 |
+
-----
|
| 572 |
+
Negative input values will be clipped.
|
| 573 |
+
Positive values are scaled between 0 and 255.
|
| 574 |
+
|
| 575 |
+
"""
|
| 576 |
+
return _convert(image, np.uint8, force_copy)
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
def img_as_bool(image, force_copy=False):
|
| 580 |
+
"""Convert an image to boolean format.
|
| 581 |
+
|
| 582 |
+
Parameters
|
| 583 |
+
----------
|
| 584 |
+
image : ndarray
|
| 585 |
+
Input image.
|
| 586 |
+
force_copy : bool, optional
|
| 587 |
+
Force a copy of the data, irrespective of its current dtype.
|
| 588 |
+
|
| 589 |
+
Returns
|
| 590 |
+
-------
|
| 591 |
+
out : ndarray of bool (`bool_`)
|
| 592 |
+
Output image.
|
| 593 |
+
|
| 594 |
+
Notes
|
| 595 |
+
-----
|
| 596 |
+
The upper half of the input dtype's positive range is True, and the lower
|
| 597 |
+
half is False. All negative values (if present) are False.
|
| 598 |
+
|
| 599 |
+
"""
|
| 600 |
+
return _convert(image, bool, force_copy)
|
vlmpy310/lib/python3.10/site-packages/skimage/util/lookfor.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
from .._vendored.numpy_lookfor import lookfor as _lookfor
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def lookfor(what):
|
| 7 |
+
"""Do a keyword search on scikit-image docstrings and print results.
|
| 8 |
+
|
| 9 |
+
.. warning::
|
| 10 |
+
|
| 11 |
+
This function may also print results that are not part of
|
| 12 |
+
scikit-image's public API.
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
what : str
|
| 17 |
+
Words to look for.
|
| 18 |
+
|
| 19 |
+
Examples
|
| 20 |
+
--------
|
| 21 |
+
>>> import skimage as ski
|
| 22 |
+
>>> ski.util.lookfor('regular_grid')
|
| 23 |
+
Search results for 'regular_grid'
|
| 24 |
+
---------------------------------
|
| 25 |
+
skimage.util.regular_grid
|
| 26 |
+
Find `n_points` regularly spaced along `ar_shape`.
|
| 27 |
+
skimage.util.lookfor
|
| 28 |
+
Do a keyword search on scikit-image docstrings and print results.
|
| 29 |
+
"""
|
| 30 |
+
return _lookfor(what, sys.modules[__name__.split('.')[0]])
|