正常运行
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 3rdparty/opencv/.gitattributes +62 -0
- 3rdparty/opencv/include/opencv2/calib3d.hpp +0 -0
- 3rdparty/opencv/include/opencv2/calib3d/calib3d.hpp +48 -0
- 3rdparty/opencv/include/opencv2/calib3d/calib3d_c.h +150 -0
- 3rdparty/opencv/include/opencv2/core.hpp +0 -0
- 3rdparty/opencv/include/opencv2/core/affine.hpp +678 -0
- 3rdparty/opencv/include/opencv2/core/async.hpp +101 -0
- 3rdparty/opencv/include/opencv2/core/base.hpp +682 -0
- 3rdparty/opencv/include/opencv2/core/bindings_utils.hpp +357 -0
- 3rdparty/opencv/include/opencv2/core/bufferpool.hpp +40 -0
- 3rdparty/opencv/include/opencv2/core/check.hpp +173 -0
- 3rdparty/opencv/include/opencv2/core/core.hpp +48 -0
- 3rdparty/opencv/include/opencv2/core/core_c.h +0 -0
- 3rdparty/opencv/include/opencv2/core/cuda.hpp +1339 -0
- 3rdparty/opencv/include/opencv2/core/cuda.inl.hpp +763 -0
- 3rdparty/opencv/include/opencv2/core/cuda/block.hpp +211 -0
- 3rdparty/opencv/include/opencv2/core/cuda/border_interpolate.hpp +722 -0
- 3rdparty/opencv/include/opencv2/core/cuda/color.hpp +309 -0
- 3rdparty/opencv/include/opencv2/core/cuda/common.hpp +131 -0
- 3rdparty/opencv/include/opencv2/core/cuda/datamov_utils.hpp +113 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/color_detail.hpp +0 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/reduce.hpp +394 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/reduce_key_val.hpp +567 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/transform_detail.hpp +392 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/type_traits_detail.hpp +191 -0
- 3rdparty/opencv/include/opencv2/core/cuda/detail/vec_distance_detail.hpp +121 -0
- 3rdparty/opencv/include/opencv2/core/cuda/dynamic_smem.hpp +88 -0
- 3rdparty/opencv/include/opencv2/core/cuda/emulation.hpp +269 -0
- 3rdparty/opencv/include/opencv2/core/cuda/filters.hpp +293 -0
- 3rdparty/opencv/include/opencv2/core/cuda/funcattrib.hpp +79 -0
- 3rdparty/opencv/include/opencv2/core/cuda/functional.hpp +805 -0
- 3rdparty/opencv/include/opencv2/core/cuda/limits.hpp +128 -0
- 3rdparty/opencv/include/opencv2/core/cuda/reduce.hpp +230 -0
- 3rdparty/opencv/include/opencv2/core/cuda/saturate_cast.hpp +292 -0
- 3rdparty/opencv/include/opencv2/core/cuda/scan.hpp +258 -0
- 3rdparty/opencv/include/opencv2/core/cuda/simd_functions.hpp +869 -0
- 3rdparty/opencv/include/opencv2/core/cuda/transform.hpp +75 -0
- 3rdparty/opencv/include/opencv2/core/cuda/type_traits.hpp +90 -0
- 3rdparty/opencv/include/opencv2/core/cuda/utility.hpp +230 -0
- 3rdparty/opencv/include/opencv2/core/cuda/vec_distance.hpp +232 -0
- 3rdparty/opencv/include/opencv2/core/cuda/vec_math.hpp +923 -0
- 3rdparty/opencv/include/opencv2/core/cuda/vec_traits.hpp +288 -0
- 3rdparty/opencv/include/opencv2/core/cuda/warp.hpp +139 -0
- 3rdparty/opencv/include/opencv2/core/cuda/warp_reduce.hpp +76 -0
- 3rdparty/opencv/include/opencv2/core/cuda/warp_shuffle.hpp +162 -0
- 3rdparty/opencv/include/opencv2/core/cuda_stream_accessor.hpp +86 -0
- 3rdparty/opencv/include/opencv2/core/cuda_types.hpp +152 -0
- 3rdparty/opencv/include/opencv2/core/cv_cpu_dispatch.h +395 -0
- 3rdparty/opencv/include/opencv2/core/cv_cpu_helper.h +613 -0
- 3rdparty/opencv/include/opencv2/core/cvdef.h +948 -0
3rdparty/opencv/.gitattributes
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.dll filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.lib filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.exp filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
# Audio files - uncompressed
|
| 42 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
# Audio files - compressed
|
| 46 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
# Image files - uncompressed
|
| 52 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
# Image files - compressed
|
| 57 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
# Video files - compressed
|
| 61 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
3rdparty/opencv/include/opencv2/calib3d.hpp
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3rdparty/opencv/include/opencv2/calib3d/calib3d.hpp
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifdef __OPENCV_BUILD
|
| 45 |
+
#error this is a compatibility header which should not be used inside the OpenCV library
|
| 46 |
+
#endif
|
| 47 |
+
|
| 48 |
+
#include "opencv2/calib3d.hpp"
|
3rdparty/opencv/include/opencv2/calib3d/calib3d_c.h
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifndef OPENCV_CALIB3D_C_H
|
| 45 |
+
#define OPENCV_CALIB3D_C_H
|
| 46 |
+
|
| 47 |
+
#include "opencv2/core/types_c.h"
|
| 48 |
+
|
| 49 |
+
#ifdef __cplusplus
|
| 50 |
+
extern "C" {
|
| 51 |
+
#endif
|
| 52 |
+
|
| 53 |
+
/* Calculates fundamental matrix given a set of corresponding points */
|
| 54 |
+
#define CV_FM_7POINT 1
|
| 55 |
+
#define CV_FM_8POINT 2
|
| 56 |
+
|
| 57 |
+
#define CV_LMEDS 4
|
| 58 |
+
#define CV_RANSAC 8
|
| 59 |
+
|
| 60 |
+
#define CV_FM_LMEDS_ONLY CV_LMEDS
|
| 61 |
+
#define CV_FM_RANSAC_ONLY CV_RANSAC
|
| 62 |
+
#define CV_FM_LMEDS CV_LMEDS
|
| 63 |
+
#define CV_FM_RANSAC CV_RANSAC
|
| 64 |
+
|
| 65 |
+
enum
|
| 66 |
+
{
|
| 67 |
+
CV_ITERATIVE = 0,
|
| 68 |
+
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
|
| 69 |
+
CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
|
| 70 |
+
CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP"
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
|
| 74 |
+
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
|
| 75 |
+
#define CV_CALIB_CB_FILTER_QUADS 4
|
| 76 |
+
#define CV_CALIB_CB_FAST_CHECK 8
|
| 77 |
+
|
| 78 |
+
#define CV_CALIB_USE_INTRINSIC_GUESS 1
|
| 79 |
+
#define CV_CALIB_FIX_ASPECT_RATIO 2
|
| 80 |
+
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
|
| 81 |
+
#define CV_CALIB_ZERO_TANGENT_DIST 8
|
| 82 |
+
#define CV_CALIB_FIX_FOCAL_LENGTH 16
|
| 83 |
+
#define CV_CALIB_FIX_K1 32
|
| 84 |
+
#define CV_CALIB_FIX_K2 64
|
| 85 |
+
#define CV_CALIB_FIX_K3 128
|
| 86 |
+
#define CV_CALIB_FIX_K4 2048
|
| 87 |
+
#define CV_CALIB_FIX_K5 4096
|
| 88 |
+
#define CV_CALIB_FIX_K6 8192
|
| 89 |
+
#define CV_CALIB_RATIONAL_MODEL 16384
|
| 90 |
+
#define CV_CALIB_THIN_PRISM_MODEL 32768
|
| 91 |
+
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
|
| 92 |
+
#define CV_CALIB_TILTED_MODEL 262144
|
| 93 |
+
#define CV_CALIB_FIX_TAUX_TAUY 524288
|
| 94 |
+
#define CV_CALIB_FIX_TANGENT_DIST 2097152
|
| 95 |
+
|
| 96 |
+
#define CV_CALIB_NINTRINSIC 18
|
| 97 |
+
|
| 98 |
+
#define CV_CALIB_FIX_INTRINSIC 256
|
| 99 |
+
#define CV_CALIB_SAME_FOCAL_LENGTH 512
|
| 100 |
+
|
| 101 |
+
#define CV_CALIB_ZERO_DISPARITY 1024
|
| 102 |
+
|
| 103 |
+
/* stereo correspondence parameters and functions */
|
| 104 |
+
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
|
| 105 |
+
#define CV_STEREO_BM_XSOBEL 1
|
| 106 |
+
|
| 107 |
+
#ifdef __cplusplus
|
| 108 |
+
} // extern "C"
|
| 109 |
+
|
| 110 |
+
//////////////////////////////////////////////////////////////////////////////////////////
|
| 111 |
+
class CV_EXPORTS CvLevMarq
|
| 112 |
+
{
|
| 113 |
+
public:
|
| 114 |
+
CvLevMarq();
|
| 115 |
+
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
|
| 116 |
+
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
| 117 |
+
bool completeSymmFlag=false );
|
| 118 |
+
~CvLevMarq();
|
| 119 |
+
void init( int nparams, int nerrs, CvTermCriteria criteria=
|
| 120 |
+
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
|
| 121 |
+
bool completeSymmFlag=false );
|
| 122 |
+
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
|
| 123 |
+
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
|
| 124 |
+
|
| 125 |
+
void clear();
|
| 126 |
+
void step();
|
| 127 |
+
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
|
| 128 |
+
|
| 129 |
+
cv::Ptr<CvMat> mask;
|
| 130 |
+
cv::Ptr<CvMat> prevParam;
|
| 131 |
+
cv::Ptr<CvMat> param;
|
| 132 |
+
cv::Ptr<CvMat> J;
|
| 133 |
+
cv::Ptr<CvMat> err;
|
| 134 |
+
cv::Ptr<CvMat> JtJ;
|
| 135 |
+
cv::Ptr<CvMat> JtJN;
|
| 136 |
+
cv::Ptr<CvMat> JtErr;
|
| 137 |
+
cv::Ptr<CvMat> JtJV;
|
| 138 |
+
cv::Ptr<CvMat> JtJW;
|
| 139 |
+
double prevErrNorm, errNorm;
|
| 140 |
+
int lambdaLg10;
|
| 141 |
+
CvTermCriteria criteria;
|
| 142 |
+
int state;
|
| 143 |
+
int iters;
|
| 144 |
+
bool completeSymmFlag;
|
| 145 |
+
int solveMethod;
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
#endif
|
| 149 |
+
|
| 150 |
+
#endif /* OPENCV_CALIB3D_C_H */
|
3rdparty/opencv/include/opencv2/core.hpp
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3rdparty/opencv/include/opencv2/core/affine.hpp
ADDED
|
@@ -0,0 +1,678 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifndef OPENCV_CORE_AFFINE3_HPP
|
| 45 |
+
#define OPENCV_CORE_AFFINE3_HPP
|
| 46 |
+
|
| 47 |
+
#ifdef __cplusplus
|
| 48 |
+
|
| 49 |
+
#include <opencv2/core.hpp>
|
| 50 |
+
|
| 51 |
+
namespace cv
|
| 52 |
+
{
|
| 53 |
+
|
| 54 |
+
//! @addtogroup core_eigen
|
| 55 |
+
//! @{
|
| 56 |
+
|
| 57 |
+
/** @brief Affine transform
|
| 58 |
+
*
|
| 59 |
+
* It represents a 4x4 homogeneous transformation matrix \f$T\f$
|
| 60 |
+
*
|
| 61 |
+
* \f[T =
|
| 62 |
+
* \begin{bmatrix}
|
| 63 |
+
* R & t\\
|
| 64 |
+
* 0 & 1\\
|
| 65 |
+
* \end{bmatrix}
|
| 66 |
+
* \f]
|
| 67 |
+
*
|
| 68 |
+
* where \f$R\f$ is a 3x3 rotation matrix and \f$t\f$ is a 3x1 translation vector.
|
| 69 |
+
*
|
| 70 |
+
* You can specify \f$R\f$ either by a 3x3 rotation matrix or by a 3x1 rotation vector,
|
| 71 |
+
* which is converted to a 3x3 rotation matrix by the Rodrigues formula.
|
| 72 |
+
*
|
| 73 |
+
* To construct a matrix \f$T\f$ representing first rotation around the axis \f$r\f$ with rotation
|
| 74 |
+
* angle \f$|r|\f$ in radian (right hand rule) and then translation by the vector \f$t\f$, you can use
|
| 75 |
+
*
|
| 76 |
+
* @code
|
| 77 |
+
* cv::Vec3f r, t;
|
| 78 |
+
* cv::Affine3f T(r, t);
|
| 79 |
+
* @endcode
|
| 80 |
+
*
|
| 81 |
+
* If you already have the rotation matrix \f$R\f$, then you can use
|
| 82 |
+
*
|
| 83 |
+
* @code
|
| 84 |
+
* cv::Matx33f R;
|
| 85 |
+
* cv::Affine3f T(R, t);
|
| 86 |
+
* @endcode
|
| 87 |
+
*
|
| 88 |
+
* To extract the rotation matrix \f$R\f$ from \f$T\f$, use
|
| 89 |
+
*
|
| 90 |
+
* @code
|
| 91 |
+
* cv::Matx33f R = T.rotation();
|
| 92 |
+
* @endcode
|
| 93 |
+
*
|
| 94 |
+
* To extract the translation vector \f$t\f$ from \f$T\f$, use
|
| 95 |
+
*
|
| 96 |
+
* @code
|
| 97 |
+
* cv::Vec3f t = T.translation();
|
| 98 |
+
* @endcode
|
| 99 |
+
*
|
| 100 |
+
* To extract the rotation vector \f$r\f$ from \f$T\f$, use
|
| 101 |
+
*
|
| 102 |
+
* @code
|
| 103 |
+
* cv::Vec3f r = T.rvec();
|
| 104 |
+
* @endcode
|
| 105 |
+
*
|
| 106 |
+
* Note that since the mapping from rotation vectors to rotation matrices
|
| 107 |
+
* is many to one. The returned rotation vector is not necessarily the one
|
| 108 |
+
* you used before to set the matrix.
|
| 109 |
+
*
|
| 110 |
+
* If you have two transformations \f$T = T_1 * T_2\f$, use
|
| 111 |
+
*
|
| 112 |
+
* @code
|
| 113 |
+
* cv::Affine3f T, T1, T2;
|
| 114 |
+
* T = T2.concatenate(T1);
|
| 115 |
+
* @endcode
|
| 116 |
+
*
|
| 117 |
+
* To get the inverse transform of \f$T\f$, use
|
| 118 |
+
*
|
| 119 |
+
* @code
|
| 120 |
+
* cv::Affine3f T, T_inv;
|
| 121 |
+
* T_inv = T.inv();
|
| 122 |
+
* @endcode
|
| 123 |
+
*
|
| 124 |
+
*/
|
| 125 |
+
template<typename T>
|
| 126 |
+
class Affine3
|
| 127 |
+
{
|
| 128 |
+
public:
|
| 129 |
+
typedef T float_type;
|
| 130 |
+
typedef Matx<float_type, 3, 3> Mat3;
|
| 131 |
+
typedef Matx<float_type, 4, 4> Mat4;
|
| 132 |
+
typedef Vec<float_type, 3> Vec3;
|
| 133 |
+
|
| 134 |
+
//! Default constructor. It represents a 4x4 identity matrix.
|
| 135 |
+
Affine3();
|
| 136 |
+
|
| 137 |
+
//! Augmented affine matrix
|
| 138 |
+
Affine3(const Mat4& affine);
|
| 139 |
+
|
| 140 |
+
/**
|
| 141 |
+
* The resulting 4x4 matrix is
|
| 142 |
+
*
|
| 143 |
+
* \f[
|
| 144 |
+
* \begin{bmatrix}
|
| 145 |
+
* R & t\\
|
| 146 |
+
* 0 & 1\\
|
| 147 |
+
* \end{bmatrix}
|
| 148 |
+
* \f]
|
| 149 |
+
*
|
| 150 |
+
* @param R 3x3 rotation matrix.
|
| 151 |
+
* @param t 3x1 translation vector.
|
| 152 |
+
*/
|
| 153 |
+
Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
|
| 154 |
+
|
| 155 |
+
/**
|
| 156 |
+
* Rodrigues vector.
|
| 157 |
+
*
|
| 158 |
+
* The last row of the current matrix is set to [0,0,0,1].
|
| 159 |
+
*
|
| 160 |
+
* @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length
|
| 161 |
+
* indicates the rotation angle in radian (using right hand rule).
|
| 162 |
+
* @param t 3x1 translation vector.
|
| 163 |
+
*/
|
| 164 |
+
Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
|
| 165 |
+
|
| 166 |
+
/**
|
| 167 |
+
* Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix.
|
| 168 |
+
*
|
| 169 |
+
* The last row of the current matrix is set to [0,0,0,1] when data is not 4x4.
|
| 170 |
+
*
|
| 171 |
+
* @param data 1-channel matrix.
|
| 172 |
+
* when it is 4x4, it is copied to the current matrix and t is not used.
|
| 173 |
+
* When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used.
|
| 174 |
+
* When it is 3x3, it is copied to the upper left 3x3 part of the current matrix.
|
| 175 |
+
* When it is 3x1 or 1x3, it is treated as a rotation vector and the Rodrigues formula is used
|
| 176 |
+
* to compute a 3x3 rotation matrix.
|
| 177 |
+
* @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4.
|
| 178 |
+
*/
|
| 179 |
+
explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
|
| 180 |
+
|
| 181 |
+
//! From 16-element array
|
| 182 |
+
explicit Affine3(const float_type* vals);
|
| 183 |
+
|
| 184 |
+
//! Create an 4x4 identity transform
|
| 185 |
+
static Affine3 Identity();
|
| 186 |
+
|
| 187 |
+
/**
|
| 188 |
+
* Rotation matrix.
|
| 189 |
+
*
|
| 190 |
+
* Copy the rotation matrix to the upper left 3x3 part of the current matrix.
|
| 191 |
+
* The remaining elements of the current matrix are not changed.
|
| 192 |
+
*
|
| 193 |
+
* @param R 3x3 rotation matrix.
|
| 194 |
+
*
|
| 195 |
+
*/
|
| 196 |
+
void rotation(const Mat3& R);
|
| 197 |
+
|
| 198 |
+
/**
|
| 199 |
+
* Rodrigues vector.
|
| 200 |
+
*
|
| 201 |
+
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
| 202 |
+
*
|
| 203 |
+
* @param rvec 3x1 rotation vector. The direction indicates the rotation axis and
|
| 204 |
+
* its length indicates the rotation angle in radian (using the right thumb convention).
|
| 205 |
+
*/
|
| 206 |
+
void rotation(const Vec3& rvec);
|
| 207 |
+
|
| 208 |
+
/**
|
| 209 |
+
* Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix.
|
| 210 |
+
*
|
| 211 |
+
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
| 212 |
+
*
|
| 213 |
+
* @param data 1-channel matrix.
|
| 214 |
+
* When it is a 3x3 matrix, it sets the upper left 3x3 part of the current matrix.
|
| 215 |
+
* When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues formula
|
| 216 |
+
* is used to compute the rotation matrix and sets the upper left 3x3 part of the current matrix.
|
| 217 |
+
*/
|
| 218 |
+
void rotation(const Mat& data);
|
| 219 |
+
|
| 220 |
+
/**
|
| 221 |
+
* Copy the 3x3 matrix L to the upper left part of the current matrix
|
| 222 |
+
*
|
| 223 |
+
* It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
|
| 224 |
+
*
|
| 225 |
+
* @param L 3x3 matrix.
|
| 226 |
+
*/
|
| 227 |
+
void linear(const Mat3& L);
|
| 228 |
+
|
| 229 |
+
/**
|
| 230 |
+
* Copy t to the first three elements of the last column of the current matrix
|
| 231 |
+
*
|
| 232 |
+
* It sets the upper right 3x1 part of the matrix. The remaining part is unaffected.
|
| 233 |
+
*
|
| 234 |
+
* @param t 3x1 translation vector.
|
| 235 |
+
*/
|
| 236 |
+
void translation(const Vec3& t);
|
| 237 |
+
|
| 238 |
+
//! @return the upper left 3x3 part
|
| 239 |
+
Mat3 rotation() const;
|
| 240 |
+
|
| 241 |
+
//! @return the upper left 3x3 part
|
| 242 |
+
Mat3 linear() const;
|
| 243 |
+
|
| 244 |
+
//! @return the upper right 3x1 part
|
| 245 |
+
Vec3 translation() const;
|
| 246 |
+
|
| 247 |
+
//! Rodrigues vector.
|
| 248 |
+
//! @return a vector representing the upper left 3x3 rotation matrix of the current matrix.
|
| 249 |
+
//! @warning Since the mapping between rotation vectors and rotation matrices is many to one,
|
| 250 |
+
//! this function returns only one rotation vector that represents the current rotation matrix,
|
| 251 |
+
//! which is not necessarily the same one set by `rotation(const Vec3& rvec)`.
|
| 252 |
+
Vec3 rvec() const;
|
| 253 |
+
|
| 254 |
+
//! @return the inverse of the current matrix.
|
| 255 |
+
Affine3 inv(int method = cv::DECOMP_SVD) const;
|
| 256 |
+
|
| 257 |
+
//! a.rotate(R) is equivalent to Affine(R, 0) * a;
|
| 258 |
+
Affine3 rotate(const Mat3& R) const;
|
| 259 |
+
|
| 260 |
+
//! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;
|
| 261 |
+
Affine3 rotate(const Vec3& rvec) const;
|
| 262 |
+
|
| 263 |
+
//! a.translate(t) is equivalent to Affine(E, t) * a, where E is an identity matrix
|
| 264 |
+
Affine3 translate(const Vec3& t) const;
|
| 265 |
+
|
| 266 |
+
//! a.concatenate(affine) is equivalent to affine * a;
|
| 267 |
+
Affine3 concatenate(const Affine3& affine) const;
|
| 268 |
+
|
| 269 |
+
template <typename Y> operator Affine3<Y>() const;
|
| 270 |
+
|
| 271 |
+
template <typename Y> Affine3<Y> cast() const;
|
| 272 |
+
|
| 273 |
+
Mat4 matrix;
|
| 274 |
+
|
| 275 |
+
#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
|
| 276 |
+
Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine);
|
| 277 |
+
Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine);
|
| 278 |
+
operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const;
|
| 279 |
+
operator Eigen::Transform<T, 3, Eigen::Affine>() const;
|
| 280 |
+
#endif
|
| 281 |
+
};
|
| 282 |
+
|
| 283 |
+
template<typename T> static
|
| 284 |
+
Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2);
|
| 285 |
+
|
| 286 |
+
//! V is a 3-element vector with member fields x, y and z
|
| 287 |
+
template<typename T, typename V> static
|
| 288 |
+
V operator*(const Affine3<T>& affine, const V& vector);
|
| 289 |
+
|
| 290 |
+
typedef Affine3<float> Affine3f;
|
| 291 |
+
typedef Affine3<double> Affine3d;
|
| 292 |
+
|
| 293 |
+
static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
|
| 294 |
+
static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
|
| 295 |
+
|
| 296 |
+
template<typename _Tp> class DataType< Affine3<_Tp> >
|
| 297 |
+
{
|
| 298 |
+
public:
|
| 299 |
+
typedef Affine3<_Tp> value_type;
|
| 300 |
+
typedef Affine3<typename DataType<_Tp>::work_type> work_type;
|
| 301 |
+
typedef _Tp channel_type;
|
| 302 |
+
|
| 303 |
+
enum { generic_type = 0,
|
| 304 |
+
channels = 16,
|
| 305 |
+
fmt = traits::SafeFmt<channel_type>::fmt + ((channels - 1) << 8)
|
| 306 |
+
#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED
|
| 307 |
+
,depth = DataType<channel_type>::depth
|
| 308 |
+
,type = CV_MAKETYPE(depth, channels)
|
| 309 |
+
#endif
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
typedef Vec<channel_type, channels> vec_type;
|
| 313 |
+
};
|
| 314 |
+
|
| 315 |
+
namespace traits {
|
| 316 |
+
template<typename _Tp>
|
| 317 |
+
struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; };
|
| 318 |
+
template<typename _Tp>
|
| 319 |
+
struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; };
|
| 320 |
+
} // namespace
|
| 321 |
+
|
| 322 |
+
//! @} core
|
| 323 |
+
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
//! @cond IGNORED
|
| 327 |
+
|
| 328 |
+
///////////////////////////////////////////////////////////////////////////////////
|
| 329 |
+
// Implementation
|
| 330 |
+
|
| 331 |
+
template<typename T> inline
|
| 332 |
+
cv::Affine3<T>::Affine3()
|
| 333 |
+
: matrix(Mat4::eye())
|
| 334 |
+
{}
|
| 335 |
+
|
| 336 |
+
template<typename T> inline
|
| 337 |
+
cv::Affine3<T>::Affine3(const Mat4& affine)
|
| 338 |
+
: matrix(affine)
|
| 339 |
+
{}
|
| 340 |
+
|
| 341 |
+
template<typename T> inline
|
| 342 |
+
cv::Affine3<T>::Affine3(const Mat3& R, const Vec3& t)
|
| 343 |
+
{
|
| 344 |
+
rotation(R);
|
| 345 |
+
translation(t);
|
| 346 |
+
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
|
| 347 |
+
matrix.val[15] = 1;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
template<typename T> inline
|
| 351 |
+
cv::Affine3<T>::Affine3(const Vec3& _rvec, const Vec3& t)
|
| 352 |
+
{
|
| 353 |
+
rotation(_rvec);
|
| 354 |
+
translation(t);
|
| 355 |
+
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
|
| 356 |
+
matrix.val[15] = 1;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
template<typename T> inline
|
| 360 |
+
cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
|
| 361 |
+
{
|
| 362 |
+
CV_Assert(data.type() == cv::traits::Type<T>::value);
|
| 363 |
+
CV_Assert(data.channels() == 1);
|
| 364 |
+
|
| 365 |
+
if (data.cols == 4 && data.rows == 4)
|
| 366 |
+
{
|
| 367 |
+
data.copyTo(matrix);
|
| 368 |
+
return;
|
| 369 |
+
}
|
| 370 |
+
else if (data.cols == 4 && data.rows == 3)
|
| 371 |
+
{
|
| 372 |
+
rotation(data(Rect(0, 0, 3, 3)));
|
| 373 |
+
translation(data(Rect(3, 0, 1, 3)));
|
| 374 |
+
}
|
| 375 |
+
else
|
| 376 |
+
{
|
| 377 |
+
rotation(data);
|
| 378 |
+
translation(t);
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
|
| 382 |
+
matrix.val[15] = 1;
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
template<typename T> inline
|
| 386 |
+
cv::Affine3<T>::Affine3(const float_type* vals) : matrix(vals)
|
| 387 |
+
{}
|
| 388 |
+
|
| 389 |
+
template<typename T> inline
|
| 390 |
+
cv::Affine3<T> cv::Affine3<T>::Identity()
|
| 391 |
+
{
|
| 392 |
+
return Affine3<T>(cv::Affine3<T>::Mat4::eye());
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
template<typename T> inline
|
| 396 |
+
void cv::Affine3<T>::rotation(const Mat3& R)
|
| 397 |
+
{
|
| 398 |
+
linear(R);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
template<typename T> inline
|
| 402 |
+
void cv::Affine3<T>::rotation(const Vec3& _rvec)
|
| 403 |
+
{
|
| 404 |
+
double theta = norm(_rvec);
|
| 405 |
+
|
| 406 |
+
if (theta < DBL_EPSILON)
|
| 407 |
+
rotation(Mat3::eye());
|
| 408 |
+
else
|
| 409 |
+
{
|
| 410 |
+
double c = std::cos(theta);
|
| 411 |
+
double s = std::sin(theta);
|
| 412 |
+
double c1 = 1. - c;
|
| 413 |
+
double itheta = (theta != 0) ? 1./theta : 0.;
|
| 414 |
+
|
| 415 |
+
Point3_<T> r = _rvec*itheta;
|
| 416 |
+
|
| 417 |
+
Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
|
| 418 |
+
Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 );
|
| 419 |
+
|
| 420 |
+
// R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
|
| 421 |
+
// where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
|
| 422 |
+
Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x;
|
| 423 |
+
|
| 424 |
+
rotation(R);
|
| 425 |
+
}
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
//Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix;
|
| 429 |
+
template<typename T> inline
|
| 430 |
+
void cv::Affine3<T>::rotation(const cv::Mat& data)
|
| 431 |
+
{
|
| 432 |
+
CV_Assert(data.type() == cv::traits::Type<T>::value);
|
| 433 |
+
CV_Assert(data.channels() == 1);
|
| 434 |
+
|
| 435 |
+
if (data.cols == 3 && data.rows == 3)
|
| 436 |
+
{
|
| 437 |
+
Mat3 R;
|
| 438 |
+
data.copyTo(R);
|
| 439 |
+
rotation(R);
|
| 440 |
+
}
|
| 441 |
+
else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
|
| 442 |
+
{
|
| 443 |
+
Vec3 _rvec;
|
| 444 |
+
data.reshape(1, 3).copyTo(_rvec);
|
| 445 |
+
rotation(_rvec);
|
| 446 |
+
}
|
| 447 |
+
else
|
| 448 |
+
CV_Error(Error::StsError, "Input matrix can only be 3x3, 1x3 or 3x1");
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
template<typename T> inline
|
| 452 |
+
void cv::Affine3<T>::linear(const Mat3& L)
|
| 453 |
+
{
|
| 454 |
+
matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
|
| 455 |
+
matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
|
| 456 |
+
matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
template<typename T> inline
|
| 460 |
+
void cv::Affine3<T>::translation(const Vec3& t)
|
| 461 |
+
{
|
| 462 |
+
matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
template<typename T> inline
|
| 466 |
+
typename cv::Affine3<T>::Mat3 cv::Affine3<T>::rotation() const
|
| 467 |
+
{
|
| 468 |
+
return linear();
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
template<typename T> inline
|
| 472 |
+
typename cv::Affine3<T>::Mat3 cv::Affine3<T>::linear() const
|
| 473 |
+
{
|
| 474 |
+
typename cv::Affine3<T>::Mat3 R;
|
| 475 |
+
R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
|
| 476 |
+
R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
|
| 477 |
+
R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
|
| 478 |
+
return R;
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template<typename T> inline
|
| 482 |
+
typename cv::Affine3<T>::Vec3 cv::Affine3<T>::translation() const
|
| 483 |
+
{
|
| 484 |
+
return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
template<typename T> inline
|
| 488 |
+
typename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const
|
| 489 |
+
{
|
| 490 |
+
cv::Vec3d w;
|
| 491 |
+
cv::Matx33d u, vt, R = rotation();
|
| 492 |
+
cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
|
| 493 |
+
R = u * vt;
|
| 494 |
+
|
| 495 |
+
double rx = R.val[7] - R.val[5];
|
| 496 |
+
double ry = R.val[2] - R.val[6];
|
| 497 |
+
double rz = R.val[3] - R.val[1];
|
| 498 |
+
|
| 499 |
+
double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
|
| 500 |
+
double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
|
| 501 |
+
c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
|
| 502 |
+
double theta = std::acos(c);
|
| 503 |
+
|
| 504 |
+
if( s < 1e-5 )
|
| 505 |
+
{
|
| 506 |
+
if( c > 0 )
|
| 507 |
+
rx = ry = rz = 0;
|
| 508 |
+
else
|
| 509 |
+
{
|
| 510 |
+
double t;
|
| 511 |
+
t = (R.val[0] + 1) * 0.5;
|
| 512 |
+
rx = std::sqrt(std::max(t, 0.0));
|
| 513 |
+
t = (R.val[4] + 1) * 0.5;
|
| 514 |
+
ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
|
| 515 |
+
t = (R.val[8] + 1) * 0.5;
|
| 516 |
+
rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
|
| 517 |
+
|
| 518 |
+
if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
|
| 519 |
+
rz = -rz;
|
| 520 |
+
theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
|
| 521 |
+
rx *= theta;
|
| 522 |
+
ry *= theta;
|
| 523 |
+
rz *= theta;
|
| 524 |
+
}
|
| 525 |
+
}
|
| 526 |
+
else
|
| 527 |
+
{
|
| 528 |
+
double vth = 1/(2*s);
|
| 529 |
+
vth *= theta;
|
| 530 |
+
rx *= vth; ry *= vth; rz *= vth;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
return cv::Vec3d(rx, ry, rz);
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
template<typename T> inline
|
| 537 |
+
cv::Affine3<T> cv::Affine3<T>::inv(int method) const
|
| 538 |
+
{
|
| 539 |
+
return matrix.inv(method);
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
template<typename T> inline
|
| 543 |
+
cv::Affine3<T> cv::Affine3<T>::rotate(const Mat3& R) const
|
| 544 |
+
{
|
| 545 |
+
Mat3 Lc = linear();
|
| 546 |
+
Vec3 tc = translation();
|
| 547 |
+
Mat4 result;
|
| 548 |
+
result.val[12] = result.val[13] = result.val[14] = 0;
|
| 549 |
+
result.val[15] = 1;
|
| 550 |
+
|
| 551 |
+
for(int j = 0; j < 3; ++j)
|
| 552 |
+
{
|
| 553 |
+
for(int i = 0; i < 3; ++i)
|
| 554 |
+
{
|
| 555 |
+
float_type value = 0;
|
| 556 |
+
for(int k = 0; k < 3; ++k)
|
| 557 |
+
value += R(j, k) * Lc(k, i);
|
| 558 |
+
result(j, i) = value;
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
result(j, 3) = R.row(j).dot(tc.t());
|
| 562 |
+
}
|
| 563 |
+
return result;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
template<typename T> inline
|
| 567 |
+
cv::Affine3<T> cv::Affine3<T>::rotate(const Vec3& _rvec) const
|
| 568 |
+
{
|
| 569 |
+
return rotate(Affine3f(_rvec).rotation());
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
template<typename T> inline
|
| 573 |
+
cv::Affine3<T> cv::Affine3<T>::translate(const Vec3& t) const
|
| 574 |
+
{
|
| 575 |
+
Mat4 m = matrix;
|
| 576 |
+
m.val[ 3] += t[0];
|
| 577 |
+
m.val[ 7] += t[1];
|
| 578 |
+
m.val[11] += t[2];
|
| 579 |
+
return m;
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
template<typename T> inline
|
| 583 |
+
cv::Affine3<T> cv::Affine3<T>::concatenate(const Affine3<T>& affine) const
|
| 584 |
+
{
|
| 585 |
+
return (*this).rotate(affine.rotation()).translate(affine.translation());
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
template<typename T> template <typename Y> inline
|
| 589 |
+
cv::Affine3<T>::operator Affine3<Y>() const
|
| 590 |
+
{
|
| 591 |
+
return Affine3<Y>(matrix);
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
template<typename T> template <typename Y> inline
|
| 595 |
+
cv::Affine3<Y> cv::Affine3<T>::cast() const
|
| 596 |
+
{
|
| 597 |
+
return Affine3<Y>(matrix);
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
template<typename T> inline
|
| 601 |
+
cv::Affine3<T> cv::operator*(const cv::Affine3<T>& affine1, const cv::Affine3<T>& affine2)
|
| 602 |
+
{
|
| 603 |
+
return affine2.concatenate(affine1);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
template<typename T, typename V> inline
|
| 607 |
+
V cv::operator*(const cv::Affine3<T>& affine, const V& v)
|
| 608 |
+
{
|
| 609 |
+
const typename Affine3<T>::Mat4& m = affine.matrix;
|
| 610 |
+
|
| 611 |
+
V r;
|
| 612 |
+
r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
|
| 613 |
+
r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
|
| 614 |
+
r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
|
| 615 |
+
return r;
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
static inline
|
| 619 |
+
cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
|
| 620 |
+
{
|
| 621 |
+
const cv::Matx44f& m = affine.matrix;
|
| 622 |
+
cv::Vec3f r;
|
| 623 |
+
r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
|
| 624 |
+
r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
|
| 625 |
+
r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
|
| 626 |
+
return r;
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
static inline
|
| 630 |
+
cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
|
| 631 |
+
{
|
| 632 |
+
const cv::Matx44d& m = affine.matrix;
|
| 633 |
+
cv::Vec3d r;
|
| 634 |
+
r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
|
| 635 |
+
r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
|
| 636 |
+
r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
|
| 637 |
+
return r;
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
|
| 643 |
+
|
| 644 |
+
template<typename T> inline
|
| 645 |
+
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine)
|
| 646 |
+
{
|
| 647 |
+
cv::Mat(4, 4, cv::traits::Type<T>::value, affine.matrix().data()).copyTo(matrix);
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
template<typename T> inline
|
| 651 |
+
cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine)
|
| 652 |
+
{
|
| 653 |
+
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine;
|
| 654 |
+
cv::Mat(4, 4, cv::traits::Type<T>::value, a.matrix().data()).copyTo(matrix);
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
template<typename T> inline
|
| 658 |
+
cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const
|
| 659 |
+
{
|
| 660 |
+
Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r;
|
| 661 |
+
cv::Mat hdr(4, 4, cv::traits::Type<T>::value, r.matrix().data());
|
| 662 |
+
cv::Mat(matrix, false).copyTo(hdr);
|
| 663 |
+
return r;
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
template<typename T> inline
|
| 667 |
+
cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine>() const
|
| 668 |
+
{
|
| 669 |
+
return this->operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>();
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
|
| 673 |
+
|
| 674 |
+
//! @endcond
|
| 675 |
+
|
| 676 |
+
#endif /* __cplusplus */
|
| 677 |
+
|
| 678 |
+
#endif /* OPENCV_CORE_AFFINE3_HPP */
|
3rdparty/opencv/include/opencv2/core/async.hpp
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of OpenCV project.
|
| 2 |
+
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
| 3 |
+
// of this distribution and at http://opencv.org/license.html.
|
| 4 |
+
|
| 5 |
+
#ifndef OPENCV_CORE_ASYNC_HPP
|
| 6 |
+
#define OPENCV_CORE_ASYNC_HPP
|
| 7 |
+
|
| 8 |
+
#include <opencv2/core/mat.hpp>
|
| 9 |
+
|
| 10 |
+
//#include <future>
|
| 11 |
+
#include <chrono>
|
| 12 |
+
|
| 13 |
+
namespace cv {
|
| 14 |
+
|
| 15 |
+
/** @addtogroup core_async
|
| 16 |
+
|
| 17 |
+
@{
|
| 18 |
+
*/
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
/** @brief Returns result of asynchronous operations
|
| 22 |
+
|
| 23 |
+
Object has attached asynchronous state.
|
| 24 |
+
Assignment operator doesn't clone asynchronous state (it is shared between all instances).
|
| 25 |
+
|
| 26 |
+
Result can be fetched via get() method only once.
|
| 27 |
+
|
| 28 |
+
*/
|
| 29 |
+
class CV_EXPORTS_W AsyncArray
|
| 30 |
+
{
|
| 31 |
+
public:
|
| 32 |
+
~AsyncArray() CV_NOEXCEPT;
|
| 33 |
+
CV_WRAP AsyncArray() CV_NOEXCEPT;
|
| 34 |
+
AsyncArray(const AsyncArray& o) CV_NOEXCEPT;
|
| 35 |
+
AsyncArray& operator=(const AsyncArray& o) CV_NOEXCEPT;
|
| 36 |
+
CV_WRAP void release() CV_NOEXCEPT;
|
| 37 |
+
|
| 38 |
+
/** Fetch the result.
|
| 39 |
+
@param[out] dst destination array
|
| 40 |
+
|
| 41 |
+
Waits for result until container has valid result.
|
| 42 |
+
Throws exception if exception was stored as a result.
|
| 43 |
+
|
| 44 |
+
Throws exception on invalid container state.
|
| 45 |
+
|
| 46 |
+
@note Result or stored exception can be fetched only once.
|
| 47 |
+
*/
|
| 48 |
+
CV_WRAP void get(OutputArray dst) const;
|
| 49 |
+
|
| 50 |
+
/** Retrieving the result with timeout
|
| 51 |
+
@param[out] dst destination array
|
| 52 |
+
@param[in] timeoutNs timeout in nanoseconds, -1 for infinite wait
|
| 53 |
+
|
| 54 |
+
@returns true if result is ready, false if the timeout has expired
|
| 55 |
+
|
| 56 |
+
@note Result or stored exception can be fetched only once.
|
| 57 |
+
*/
|
| 58 |
+
bool get(OutputArray dst, int64 timeoutNs) const;
|
| 59 |
+
|
| 60 |
+
CV_WRAP inline
|
| 61 |
+
bool get(OutputArray dst, double timeoutNs) const { return get(dst, (int64)timeoutNs); }
|
| 62 |
+
|
| 63 |
+
bool wait_for(int64 timeoutNs) const;
|
| 64 |
+
|
| 65 |
+
CV_WRAP inline
|
| 66 |
+
bool wait_for(double timeoutNs) const { return wait_for((int64)timeoutNs); }
|
| 67 |
+
|
| 68 |
+
CV_WRAP bool valid() const CV_NOEXCEPT;
|
| 69 |
+
|
| 70 |
+
inline AsyncArray(AsyncArray&& o) { p = o.p; o.p = NULL; }
|
| 71 |
+
inline AsyncArray& operator=(AsyncArray&& o) CV_NOEXCEPT { std::swap(p, o.p); return *this; }
|
| 72 |
+
|
| 73 |
+
template<typename _Rep, typename _Period>
|
| 74 |
+
inline bool get(OutputArray dst, const std::chrono::duration<_Rep, _Period>& timeout)
|
| 75 |
+
{
|
| 76 |
+
return get(dst, (int64)(std::chrono::nanoseconds(timeout).count()));
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
template<typename _Rep, typename _Period>
|
| 80 |
+
inline bool wait_for(const std::chrono::duration<_Rep, _Period>& timeout)
|
| 81 |
+
{
|
| 82 |
+
return wait_for((int64)(std::chrono::nanoseconds(timeout).count()));
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
#if 0
|
| 86 |
+
std::future<Mat> getFutureMat() const;
|
| 87 |
+
std::future<UMat> getFutureUMat() const;
|
| 88 |
+
#endif
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
// PImpl
|
| 92 |
+
struct Impl; friend struct Impl;
|
| 93 |
+
inline void* _getImpl() const CV_NOEXCEPT { return p; }
|
| 94 |
+
protected:
|
| 95 |
+
Impl* p;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
//! @}
|
| 100 |
+
} // namespace
|
| 101 |
+
#endif // OPENCV_CORE_ASYNC_HPP
|
3rdparty/opencv/include/opencv2/core/base.hpp
ADDED
|
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Copyright (C) 2014, Itseez Inc., all rights reserved.
|
| 17 |
+
// Third party copyrights are property of their respective owners.
|
| 18 |
+
//
|
| 19 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 20 |
+
// are permitted provided that the following conditions are met:
|
| 21 |
+
//
|
| 22 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 23 |
+
// this list of conditions and the following disclaimer.
|
| 24 |
+
//
|
| 25 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 26 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 27 |
+
// and/or other materials provided with the distribution.
|
| 28 |
+
//
|
| 29 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 30 |
+
// derived from this software without specific prior written permission.
|
| 31 |
+
//
|
| 32 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 33 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 34 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 35 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 36 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 37 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 38 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 39 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 40 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 41 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 42 |
+
//
|
| 43 |
+
//M*/
|
| 44 |
+
|
| 45 |
+
#ifndef OPENCV_CORE_BASE_HPP
|
| 46 |
+
#define OPENCV_CORE_BASE_HPP
|
| 47 |
+
|
| 48 |
+
#ifndef __cplusplus
|
| 49 |
+
# error base.hpp header must be compiled as C++
|
| 50 |
+
#endif
|
| 51 |
+
|
| 52 |
+
#include "opencv2/opencv_modules.hpp"
|
| 53 |
+
|
| 54 |
+
#include <climits>
|
| 55 |
+
#include <algorithm>
|
| 56 |
+
|
| 57 |
+
#include "opencv2/core/cvdef.h"
|
| 58 |
+
#include "opencv2/core/cvstd.hpp"
|
| 59 |
+
|
| 60 |
+
namespace cv
|
| 61 |
+
{
|
| 62 |
+
|
| 63 |
+
//! @addtogroup core_utils
|
| 64 |
+
//! @{
|
| 65 |
+
|
| 66 |
+
namespace Error {
|
| 67 |
+
//! error codes
|
| 68 |
+
enum Code {
|
| 69 |
+
StsOk= 0, //!< everything is ok
|
| 70 |
+
StsBackTrace= -1, //!< pseudo error for back trace
|
| 71 |
+
StsError= -2, //!< unknown /unspecified error
|
| 72 |
+
StsInternal= -3, //!< internal error (bad state)
|
| 73 |
+
StsNoMem= -4, //!< insufficient memory
|
| 74 |
+
StsBadArg= -5, //!< function arg/param is bad
|
| 75 |
+
StsBadFunc= -6, //!< unsupported function
|
| 76 |
+
StsNoConv= -7, //!< iteration didn't converge
|
| 77 |
+
StsAutoTrace= -8, //!< tracing
|
| 78 |
+
HeaderIsNull= -9, //!< image header is NULL
|
| 79 |
+
BadImageSize= -10, //!< image size is invalid
|
| 80 |
+
BadOffset= -11, //!< offset is invalid
|
| 81 |
+
BadDataPtr= -12, //!<
|
| 82 |
+
BadStep= -13, //!< image step is wrong, this may happen for a non-continuous matrix.
|
| 83 |
+
BadModelOrChSeq= -14, //!<
|
| 84 |
+
BadNumChannels= -15, //!< bad number of channels, for example, some functions accept only single channel matrices.
|
| 85 |
+
BadNumChannel1U= -16, //!<
|
| 86 |
+
BadDepth= -17, //!< input image depth is not supported by the function
|
| 87 |
+
BadAlphaChannel= -18, //!<
|
| 88 |
+
BadOrder= -19, //!< number of dimensions is out of range
|
| 89 |
+
BadOrigin= -20, //!< incorrect input origin
|
| 90 |
+
BadAlign= -21, //!< incorrect input align
|
| 91 |
+
BadCallBack= -22, //!<
|
| 92 |
+
BadTileSize= -23, //!<
|
| 93 |
+
BadCOI= -24, //!< input COI is not supported
|
| 94 |
+
BadROISize= -25, //!< incorrect input roi
|
| 95 |
+
MaskIsTiled= -26, //!<
|
| 96 |
+
StsNullPtr= -27, //!< null pointer
|
| 97 |
+
StsVecLengthErr= -28, //!< incorrect vector length
|
| 98 |
+
StsFilterStructContentErr= -29, //!< incorrect filter structure content
|
| 99 |
+
StsKernelStructContentErr= -30, //!< incorrect transform kernel content
|
| 100 |
+
StsFilterOffsetErr= -31, //!< incorrect filter offset value
|
| 101 |
+
StsBadSize= -201, //!< the input/output structure size is incorrect
|
| 102 |
+
StsDivByZero= -202, //!< division by zero
|
| 103 |
+
StsInplaceNotSupported= -203, //!< in-place operation is not supported
|
| 104 |
+
StsObjectNotFound= -204, //!< request can't be completed
|
| 105 |
+
StsUnmatchedFormats= -205, //!< formats of input/output arrays differ
|
| 106 |
+
StsBadFlag= -206, //!< flag is wrong or not supported
|
| 107 |
+
StsBadPoint= -207, //!< bad CvPoint
|
| 108 |
+
StsBadMask= -208, //!< bad format of mask (neither 8uC1 nor 8sC1)
|
| 109 |
+
StsUnmatchedSizes= -209, //!< sizes of input/output structures do not match
|
| 110 |
+
StsUnsupportedFormat= -210, //!< the data format/type is not supported by the function
|
| 111 |
+
StsOutOfRange= -211, //!< some of parameters are out of range
|
| 112 |
+
StsParseError= -212, //!< invalid syntax/structure of the parsed file
|
| 113 |
+
StsNotImplemented= -213, //!< the requested function/feature is not implemented
|
| 114 |
+
StsBadMemBlock= -214, //!< an allocated block has been corrupted
|
| 115 |
+
StsAssert= -215, //!< assertion failed
|
| 116 |
+
GpuNotSupported= -216, //!< no CUDA support
|
| 117 |
+
GpuApiCallError= -217, //!< GPU API call error
|
| 118 |
+
OpenGlNotSupported= -218, //!< no OpenGL support
|
| 119 |
+
OpenGlApiCallError= -219, //!< OpenGL API call error
|
| 120 |
+
OpenCLApiCallError= -220, //!< OpenCL API call error
|
| 121 |
+
OpenCLDoubleNotSupported= -221,
|
| 122 |
+
OpenCLInitError= -222, //!< OpenCL initialization error
|
| 123 |
+
OpenCLNoAMDBlasFft= -223
|
| 124 |
+
};
|
| 125 |
+
} //Error
|
| 126 |
+
|
| 127 |
+
//! @} core_utils
|
| 128 |
+
|
| 129 |
+
//! @addtogroup core_array
|
| 130 |
+
//! @{
|
| 131 |
+
|
| 132 |
+
//! matrix decomposition types
|
| 133 |
+
enum DecompTypes {
|
| 134 |
+
/** Gaussian elimination with the optimal pivot element chosen. */
|
| 135 |
+
DECOMP_LU = 0,
|
| 136 |
+
/** singular value decomposition (SVD) method; the system can be over-defined and/or the matrix
|
| 137 |
+
src1 can be singular */
|
| 138 |
+
DECOMP_SVD = 1,
|
| 139 |
+
/** eigenvalue decomposition; the matrix src1 must be symmetrical */
|
| 140 |
+
DECOMP_EIG = 2,
|
| 141 |
+
/** Cholesky \f$LL^T\f$ factorization; the matrix src1 must be symmetrical and positively
|
| 142 |
+
defined */
|
| 143 |
+
DECOMP_CHOLESKY = 3,
|
| 144 |
+
/** QR factorization; the system can be over-defined and/or the matrix src1 can be singular */
|
| 145 |
+
DECOMP_QR = 4,
|
| 146 |
+
/** while all the previous flags are mutually exclusive, this flag can be used together with
|
| 147 |
+
any of the previous; it means that the normal equations
|
| 148 |
+
\f$\texttt{src1}^T\cdot\texttt{src1}\cdot\texttt{dst}=\texttt{src1}^T\texttt{src2}\f$ are
|
| 149 |
+
solved instead of the original system
|
| 150 |
+
\f$\texttt{src1}\cdot\texttt{dst}=\texttt{src2}\f$ */
|
| 151 |
+
DECOMP_NORMAL = 16
|
| 152 |
+
};
|
| 153 |
+
|
| 154 |
+
/** norm types
|
| 155 |
+
|
| 156 |
+
src1 and src2 denote input arrays.
|
| 157 |
+
*/
|
| 158 |
+
|
| 159 |
+
enum NormTypes {
|
| 160 |
+
/**
|
| 161 |
+
\f[
|
| 162 |
+
norm = \forkthree
|
| 163 |
+
{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
|
| 164 |
+
{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
|
| 165 |
+
{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_INF}\) }
|
| 166 |
+
\f]
|
| 167 |
+
*/
|
| 168 |
+
NORM_INF = 1,
|
| 169 |
+
/**
|
| 170 |
+
\f[
|
| 171 |
+
norm = \forkthree
|
| 172 |
+
{\| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\)}
|
| 173 |
+
{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
|
| 174 |
+
{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L1}\) }
|
| 175 |
+
\f]*/
|
| 176 |
+
NORM_L1 = 2,
|
| 177 |
+
/**
|
| 178 |
+
\f[
|
| 179 |
+
norm = \forkthree
|
| 180 |
+
{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
|
| 181 |
+
{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
|
| 182 |
+
{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) }
|
| 183 |
+
\f]
|
| 184 |
+
*/
|
| 185 |
+
NORM_L2 = 4,
|
| 186 |
+
/**
|
| 187 |
+
\f[
|
| 188 |
+
norm = \forkthree
|
| 189 |
+
{ \| \texttt{src1} \| _{L_2} ^{2} = \sum_I \texttt{src1}(I)^2} {if \(\texttt{normType} = \texttt{NORM_L2SQR}\)}
|
| 190 |
+
{ \| \texttt{src1} - \texttt{src2} \| _{L_2} ^{2} = \sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2 }{if \(\texttt{normType} = \texttt{NORM_L2SQR}\) }
|
| 191 |
+
{ \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2SQR}\) }
|
| 192 |
+
\f]
|
| 193 |
+
*/
|
| 194 |
+
NORM_L2SQR = 5,
|
| 195 |
+
/**
|
| 196 |
+
In the case of one input array, calculates the Hamming distance of the array from zero,
|
| 197 |
+
In the case of two input arrays, calculates the Hamming distance between the arrays.
|
| 198 |
+
*/
|
| 199 |
+
NORM_HAMMING = 6,
|
| 200 |
+
/**
|
| 201 |
+
Similar to NORM_HAMMING, but in the calculation, each two bits of the input sequence will
|
| 202 |
+
be added and treated as a single bit to be used in the same calculation as NORM_HAMMING.
|
| 203 |
+
*/
|
| 204 |
+
NORM_HAMMING2 = 7,
|
| 205 |
+
NORM_TYPE_MASK = 7, //!< bit-mask which can be used to separate norm type from norm flags
|
| 206 |
+
NORM_RELATIVE = 8, //!< flag
|
| 207 |
+
NORM_MINMAX = 32 //!< flag
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
//! comparison types
|
| 211 |
+
enum CmpTypes { CMP_EQ = 0, //!< src1 is equal to src2.
|
| 212 |
+
CMP_GT = 1, //!< src1 is greater than src2.
|
| 213 |
+
CMP_GE = 2, //!< src1 is greater than or equal to src2.
|
| 214 |
+
CMP_LT = 3, //!< src1 is less than src2.
|
| 215 |
+
CMP_LE = 4, //!< src1 is less than or equal to src2.
|
| 216 |
+
CMP_NE = 5 //!< src1 is unequal to src2.
|
| 217 |
+
};
|
| 218 |
+
|
| 219 |
+
//! generalized matrix multiplication flags
|
| 220 |
+
enum GemmFlags { GEMM_1_T = 1, //!< transposes src1
|
| 221 |
+
GEMM_2_T = 2, //!< transposes src2
|
| 222 |
+
GEMM_3_T = 4 //!< transposes src3
|
| 223 |
+
};
|
| 224 |
+
|
| 225 |
+
enum DftFlags {
|
| 226 |
+
/** performs an inverse 1D or 2D transform instead of the default forward
|
| 227 |
+
transform. */
|
| 228 |
+
DFT_INVERSE = 1,
|
| 229 |
+
/** scales the result: divide it by the number of array elements. Normally, it is
|
| 230 |
+
combined with DFT_INVERSE. */
|
| 231 |
+
DFT_SCALE = 2,
|
| 232 |
+
/** performs a forward or inverse transform of every individual row of the input
|
| 233 |
+
matrix; this flag enables you to transform multiple vectors simultaneously and can be used to
|
| 234 |
+
decrease the overhead (which is sometimes several times larger than the processing itself) to
|
| 235 |
+
perform 3D and higher-dimensional transformations and so forth.*/
|
| 236 |
+
DFT_ROWS = 4,
|
| 237 |
+
/** performs a forward transformation of 1D or 2D real array; the result,
|
| 238 |
+
though being a complex array, has complex-conjugate symmetry (*CCS*, see the function
|
| 239 |
+
description below for details), and such an array can be packed into a real array of the same
|
| 240 |
+
size as input, which is the fastest option and which is what the function does by default;
|
| 241 |
+
however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) -
|
| 242 |
+
pass the flag to enable the function to produce a full-size complex output array. */
|
| 243 |
+
DFT_COMPLEX_OUTPUT = 16,
|
| 244 |
+
/** performs an inverse transformation of a 1D or 2D complex array; the
|
| 245 |
+
result is normally a complex array of the same size, however, if the input array has
|
| 246 |
+
conjugate-complex symmetry (for example, it is a result of forward transformation with
|
| 247 |
+
DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not
|
| 248 |
+
check whether the input is symmetrical or not, you can pass the flag and then the function
|
| 249 |
+
will assume the symmetry and produce the real output array (note that when the input is packed
|
| 250 |
+
into a real array and inverse transformation is executed, the function treats the input as a
|
| 251 |
+
packed complex-conjugate symmetrical array, and the output will also be a real array). */
|
| 252 |
+
DFT_REAL_OUTPUT = 32,
|
| 253 |
+
/** specifies that input is complex input. If this flag is set, the input must have 2 channels.
|
| 254 |
+
On the other hand, for backwards compatibility reason, if input has 2 channels, input is
|
| 255 |
+
already considered complex. */
|
| 256 |
+
DFT_COMPLEX_INPUT = 64,
|
| 257 |
+
/** performs an inverse 1D or 2D transform instead of the default forward transform. */
|
| 258 |
+
DCT_INVERSE = DFT_INVERSE,
|
| 259 |
+
/** performs a forward or inverse transform of every individual row of the input
|
| 260 |
+
matrix. This flag enables you to transform multiple vectors simultaneously and can be used to
|
| 261 |
+
decrease the overhead (which is sometimes several times larger than the processing itself) to
|
| 262 |
+
perform 3D and higher-dimensional transforms and so forth.*/
|
| 263 |
+
DCT_ROWS = DFT_ROWS
|
| 264 |
+
};
|
| 265 |
+
|
| 266 |
+
//! Various border types, image boundaries are denoted with `|`
|
| 267 |
+
//! @see borderInterpolate, copyMakeBorder
|
| 268 |
+
enum BorderTypes {
|
| 269 |
+
BORDER_CONSTANT = 0, //!< `iiiiii|abcdefgh|iiiiiii` with some specified `i`
|
| 270 |
+
BORDER_REPLICATE = 1, //!< `aaaaaa|abcdefgh|hhhhhhh`
|
| 271 |
+
BORDER_REFLECT = 2, //!< `fedcba|abcdefgh|hgfedcb`
|
| 272 |
+
BORDER_WRAP = 3, //!< `cdefgh|abcdefgh|abcdefg`
|
| 273 |
+
BORDER_REFLECT_101 = 4, //!< `gfedcb|abcdefgh|gfedcba`
|
| 274 |
+
BORDER_TRANSPARENT = 5, //!< `uvwxyz|abcdefgh|ijklmno` - Treats outliers as transparent.
|
| 275 |
+
|
| 276 |
+
BORDER_REFLECT101 = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
|
| 277 |
+
BORDER_DEFAULT = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101
|
| 278 |
+
BORDER_ISOLATED = 16 //!< Interpolation restricted within the ROI boundaries.
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
//! @} core_array
|
| 282 |
+
|
| 283 |
+
//! @addtogroup core_utils
|
| 284 |
+
//! @{
|
| 285 |
+
|
| 286 |
+
/*! @brief Signals an error and raises the exception.
|
| 287 |
+
|
| 288 |
+
By default the function prints information about the error to stderr,
|
| 289 |
+
then it either stops if setBreakOnError() had been called before or raises the exception.
|
| 290 |
+
It is possible to alternate error processing by using redirectError().
|
| 291 |
+
@param code - error code (Error::Code)
|
| 292 |
+
@param err - error description
|
| 293 |
+
@param func - function name. Available only when the compiler supports getting it
|
| 294 |
+
@param file - source file name where the error has occurred
|
| 295 |
+
@param line - line number in the source file where the error has occurred
|
| 296 |
+
@see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert
|
| 297 |
+
*/
|
| 298 |
+
CV_EXPORTS CV_NORETURN void error(int code, const String& err, const char* func, const char* file, int line);
|
| 299 |
+
|
| 300 |
+
/*! @brief Signals an error and terminate application.
|
| 301 |
+
|
| 302 |
+
By default the function prints information about the error to stderr, then it terminates application
|
| 303 |
+
with std::terminate. The function is designed for invariants check in functions and methods with
|
| 304 |
+
noexcept attribute.
|
| 305 |
+
@param code - error code (Error::Code)
|
| 306 |
+
@param err - error description
|
| 307 |
+
@param func - function name. Available only when the compiler supports getting it
|
| 308 |
+
@param file - source file name where the error has occurred
|
| 309 |
+
@param line - line number in the source file where the error has occurred
|
| 310 |
+
@see CV_AssertTerminate
|
| 311 |
+
*/
|
| 312 |
+
CV_EXPORTS CV_NORETURN void terminate(int code, const String& err, const char* func, const char* file, int line) CV_NOEXCEPT;
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
#ifdef CV_STATIC_ANALYSIS
|
| 316 |
+
|
| 317 |
+
// In practice, some macro are not processed correctly (noreturn is not detected).
|
| 318 |
+
// We need to use simplified definition for them.
|
| 319 |
+
#define CV_Error(code, msg) do { (void)(code); (void)(msg); abort(); } while (0)
|
| 320 |
+
#define CV_Error_(code, args) do { (void)(code); (void)(cv::format args); abort(); } while (0)
|
| 321 |
+
#define CV_Assert( expr ) do { if (!(expr)) abort(); } while (0)
|
| 322 |
+
|
| 323 |
+
#else // CV_STATIC_ANALYSIS
|
| 324 |
+
|
| 325 |
+
/** @brief Call the error handler.
|
| 326 |
+
|
| 327 |
+
Currently, the error handler prints the error code and the error message to the standard
|
| 328 |
+
error stream `stderr`. In the Debug configuration, it then provokes memory access violation, so that
|
| 329 |
+
the execution stack and all the parameters can be analyzed by the debugger. In the Release
|
| 330 |
+
configuration, the exception is thrown.
|
| 331 |
+
|
| 332 |
+
@param code one of Error::Code
|
| 333 |
+
@param msg error message
|
| 334 |
+
*/
|
| 335 |
+
#define CV_Error( code, msg ) cv::error( code, msg, CV_Func, __FILE__, __LINE__ )
|
| 336 |
+
|
| 337 |
+
/** @brief Call the error handler.
|
| 338 |
+
|
| 339 |
+
This macro can be used to construct an error message on-fly to include some dynamic information,
|
| 340 |
+
for example:
|
| 341 |
+
@code
|
| 342 |
+
// note the extra parentheses around the formatted text message
|
| 343 |
+
CV_Error_(Error::StsOutOfRange,
|
| 344 |
+
("the value at (%d, %d)=%g is out of range", badPt.x, badPt.y, badValue));
|
| 345 |
+
@endcode
|
| 346 |
+
@param code one of Error::Code
|
| 347 |
+
@param args printf-like formatted error message in parentheses
|
| 348 |
+
*/
|
| 349 |
+
#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )
|
| 350 |
+
|
| 351 |
+
/** @brief Checks a condition at runtime and throws exception if it fails
|
| 352 |
+
|
| 353 |
+
The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
|
| 354 |
+
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
|
| 355 |
+
configurations while CV_DbgAssert is only retained in the Debug configuration.
|
| 356 |
+
CV_AssertTerminate is analog of CV_Assert for invariants check in functions with noexcept attribute.
|
| 357 |
+
It does not throw exception, but terminates the application.
|
| 358 |
+
*/
|
| 359 |
+
#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
|
| 360 |
+
#define CV_AssertTerminate( expr ) do { if(!!(expr)) ; else cv::terminate( #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
|
| 361 |
+
|
| 362 |
+
#endif // CV_STATIC_ANALYSIS
|
| 363 |
+
|
| 364 |
+
//! @cond IGNORED
|
| 365 |
+
#if !defined(__OPENCV_BUILD) // TODO: backward compatibility only
|
| 366 |
+
#ifndef CV_ErrorNoReturn
|
| 367 |
+
#define CV_ErrorNoReturn CV_Error
|
| 368 |
+
#endif
|
| 369 |
+
#ifndef CV_ErrorNoReturn_
|
| 370 |
+
#define CV_ErrorNoReturn_ CV_Error_
|
| 371 |
+
#endif
|
| 372 |
+
#endif
|
| 373 |
+
|
| 374 |
+
#define CV_Assert_1 CV_Assert
|
| 375 |
+
#define CV_Assert_2( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_1( __VA_ARGS__ ))
|
| 376 |
+
#define CV_Assert_3( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_2( __VA_ARGS__ ))
|
| 377 |
+
#define CV_Assert_4( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_3( __VA_ARGS__ ))
|
| 378 |
+
#define CV_Assert_5( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_4( __VA_ARGS__ ))
|
| 379 |
+
#define CV_Assert_6( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_5( __VA_ARGS__ ))
|
| 380 |
+
#define CV_Assert_7( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_6( __VA_ARGS__ ))
|
| 381 |
+
#define CV_Assert_8( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_7( __VA_ARGS__ ))
|
| 382 |
+
#define CV_Assert_9( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_8( __VA_ARGS__ ))
|
| 383 |
+
#define CV_Assert_10( expr, ... ) CV_Assert_1(expr); __CV_EXPAND(CV_Assert_9( __VA_ARGS__ ))
|
| 384 |
+
|
| 385 |
+
#define CV_Assert_N(...) do { __CV_EXPAND(__CV_CAT(CV_Assert_, __CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__)); } while(0)
|
| 386 |
+
|
| 387 |
+
//! @endcond
|
| 388 |
+
|
| 389 |
+
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
|
| 390 |
+
# define CV_DbgAssert(expr) CV_Assert(expr)
|
| 391 |
+
#else
|
| 392 |
+
/** replaced with CV_Assert(expr) in Debug configuration */
|
| 393 |
+
# define CV_DbgAssert(expr)
|
| 394 |
+
#endif
|
| 395 |
+
|
| 396 |
+
/*
|
| 397 |
+
* Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
|
| 398 |
+
* bit count of A exclusive XOR'ed with B
|
| 399 |
+
*/
|
| 400 |
+
struct CV_EXPORTS Hamming
|
| 401 |
+
{
|
| 402 |
+
static const NormTypes normType = NORM_HAMMING;
|
| 403 |
+
typedef unsigned char ValueType;
|
| 404 |
+
typedef int ResultType;
|
| 405 |
+
|
| 406 |
+
/** this will count the bits in a ^ b
|
| 407 |
+
*/
|
| 408 |
+
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const;
|
| 409 |
+
};
|
| 410 |
+
|
| 411 |
+
typedef Hamming HammingLUT;
|
| 412 |
+
|
| 413 |
+
/////////////////////////////////// inline norms ////////////////////////////////////
|
| 414 |
+
|
| 415 |
+
template<typename _Tp> inline _Tp cv_abs(_Tp x) { return std::abs(x); }
|
| 416 |
+
inline int cv_abs(uchar x) { return x; }
|
| 417 |
+
inline int cv_abs(schar x) { return std::abs(x); }
|
| 418 |
+
inline int cv_abs(ushort x) { return x; }
|
| 419 |
+
inline int cv_abs(short x) { return std::abs(x); }
|
| 420 |
+
|
| 421 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 422 |
+
_AccTp normL2Sqr(const _Tp* a, int n)
|
| 423 |
+
{
|
| 424 |
+
_AccTp s = 0;
|
| 425 |
+
int i=0;
|
| 426 |
+
#if CV_ENABLE_UNROLLED
|
| 427 |
+
for( ; i <= n - 4; i += 4 )
|
| 428 |
+
{
|
| 429 |
+
_AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3];
|
| 430 |
+
s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
|
| 431 |
+
}
|
| 432 |
+
#endif
|
| 433 |
+
for( ; i < n; i++ )
|
| 434 |
+
{
|
| 435 |
+
_AccTp v = a[i];
|
| 436 |
+
s += v*v;
|
| 437 |
+
}
|
| 438 |
+
return s;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 442 |
+
_AccTp normL1(const _Tp* a, int n)
|
| 443 |
+
{
|
| 444 |
+
_AccTp s = 0;
|
| 445 |
+
int i = 0;
|
| 446 |
+
#if CV_ENABLE_UNROLLED
|
| 447 |
+
for(; i <= n - 4; i += 4 )
|
| 448 |
+
{
|
| 449 |
+
s += (_AccTp)cv_abs(a[i]) + (_AccTp)cv_abs(a[i+1]) +
|
| 450 |
+
(_AccTp)cv_abs(a[i+2]) + (_AccTp)cv_abs(a[i+3]);
|
| 451 |
+
}
|
| 452 |
+
#endif
|
| 453 |
+
for( ; i < n; i++ )
|
| 454 |
+
s += cv_abs(a[i]);
|
| 455 |
+
return s;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 459 |
+
_AccTp normInf(const _Tp* a, int n)
|
| 460 |
+
{
|
| 461 |
+
_AccTp s = 0;
|
| 462 |
+
for( int i = 0; i < n; i++ )
|
| 463 |
+
s = std::max(s, (_AccTp)cv_abs(a[i]));
|
| 464 |
+
return s;
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 468 |
+
_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
|
| 469 |
+
{
|
| 470 |
+
_AccTp s = 0;
|
| 471 |
+
int i= 0;
|
| 472 |
+
#if CV_ENABLE_UNROLLED
|
| 473 |
+
for(; i <= n - 4; i += 4 )
|
| 474 |
+
{
|
| 475 |
+
_AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
|
| 476 |
+
s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
|
| 477 |
+
}
|
| 478 |
+
#endif
|
| 479 |
+
for( ; i < n; i++ )
|
| 480 |
+
{
|
| 481 |
+
_AccTp v = _AccTp(a[i] - b[i]);
|
| 482 |
+
s += v*v;
|
| 483 |
+
}
|
| 484 |
+
return s;
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
static inline float normL2Sqr(const float* a, const float* b, int n)
|
| 488 |
+
{
|
| 489 |
+
float s = 0.f;
|
| 490 |
+
for( int i = 0; i < n; i++ )
|
| 491 |
+
{
|
| 492 |
+
float v = a[i] - b[i];
|
| 493 |
+
s += v*v;
|
| 494 |
+
}
|
| 495 |
+
return s;
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 499 |
+
_AccTp normL1(const _Tp* a, const _Tp* b, int n)
|
| 500 |
+
{
|
| 501 |
+
_AccTp s = 0;
|
| 502 |
+
int i= 0;
|
| 503 |
+
#if CV_ENABLE_UNROLLED
|
| 504 |
+
for(; i <= n - 4; i += 4 )
|
| 505 |
+
{
|
| 506 |
+
_AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
|
| 507 |
+
s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);
|
| 508 |
+
}
|
| 509 |
+
#endif
|
| 510 |
+
for( ; i < n; i++ )
|
| 511 |
+
{
|
| 512 |
+
_AccTp v = _AccTp(a[i] - b[i]);
|
| 513 |
+
s += std::abs(v);
|
| 514 |
+
}
|
| 515 |
+
return s;
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
inline float normL1(const float* a, const float* b, int n)
|
| 519 |
+
{
|
| 520 |
+
float s = 0.f;
|
| 521 |
+
for( int i = 0; i < n; i++ )
|
| 522 |
+
{
|
| 523 |
+
s += std::abs(a[i] - b[i]);
|
| 524 |
+
}
|
| 525 |
+
return s;
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
inline int normL1(const uchar* a, const uchar* b, int n)
|
| 529 |
+
{
|
| 530 |
+
int s = 0;
|
| 531 |
+
for( int i = 0; i < n; i++ )
|
| 532 |
+
{
|
| 533 |
+
s += std::abs(a[i] - b[i]);
|
| 534 |
+
}
|
| 535 |
+
return s;
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
template<typename _Tp, typename _AccTp> static inline
|
| 539 |
+
_AccTp normInf(const _Tp* a, const _Tp* b, int n)
|
| 540 |
+
{
|
| 541 |
+
_AccTp s = 0;
|
| 542 |
+
for( int i = 0; i < n; i++ )
|
| 543 |
+
{
|
| 544 |
+
_AccTp v0 = a[i] - b[i];
|
| 545 |
+
s = std::max(s, std::abs(v0));
|
| 546 |
+
}
|
| 547 |
+
return s;
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
/** @brief Computes the cube root of an argument.
|
| 551 |
+
|
| 552 |
+
The function cubeRoot computes \f$\sqrt[3]{\texttt{val}}\f$. Negative arguments are handled correctly.
|
| 553 |
+
NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for
|
| 554 |
+
single-precision data.
|
| 555 |
+
@param val A function argument.
|
| 556 |
+
*/
|
| 557 |
+
CV_EXPORTS_W float cubeRoot(float val);
|
| 558 |
+
|
| 559 |
+
/** @overload
|
| 560 |
+
|
| 561 |
+
cubeRoot with argument of `double` type calls `std::cbrt(double)`
|
| 562 |
+
*/
|
| 563 |
+
static inline
|
| 564 |
+
double cubeRoot(double val)
|
| 565 |
+
{
|
| 566 |
+
return std::cbrt(val);
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
/** @brief Calculates the angle of a 2D vector in degrees.
|
| 570 |
+
|
| 571 |
+
The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured
|
| 572 |
+
in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.
|
| 573 |
+
@param x x-coordinate of the vector.
|
| 574 |
+
@param y y-coordinate of the vector.
|
| 575 |
+
*/
|
| 576 |
+
CV_EXPORTS_W float fastAtan2(float y, float x);
|
| 577 |
+
|
| 578 |
+
/** proxy for hal::LU */
|
| 579 |
+
CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
|
| 580 |
+
/** proxy for hal::LU */
|
| 581 |
+
CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
|
| 582 |
+
/** proxy for hal::Cholesky */
|
| 583 |
+
CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
|
| 584 |
+
/** proxy for hal::Cholesky */
|
| 585 |
+
CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
|
| 586 |
+
|
| 587 |
+
////////////////// forward declarations for important OpenCV types //////////////////
|
| 588 |
+
|
| 589 |
+
//! @cond IGNORED
|
| 590 |
+
|
| 591 |
+
template<typename _Tp, int cn> class Vec;
|
| 592 |
+
template<typename _Tp, int m, int n> class Matx;
|
| 593 |
+
|
| 594 |
+
template<typename _Tp> class Complex;
|
| 595 |
+
template<typename _Tp> class Point_;
|
| 596 |
+
template<typename _Tp> class Point3_;
|
| 597 |
+
template<typename _Tp> class Size_;
|
| 598 |
+
template<typename _Tp> class Rect_;
|
| 599 |
+
template<typename _Tp> class Scalar_;
|
| 600 |
+
|
| 601 |
+
class CV_EXPORTS RotatedRect;
|
| 602 |
+
class CV_EXPORTS Range;
|
| 603 |
+
class CV_EXPORTS TermCriteria;
|
| 604 |
+
class CV_EXPORTS KeyPoint;
|
| 605 |
+
class CV_EXPORTS DMatch;
|
| 606 |
+
class CV_EXPORTS RNG;
|
| 607 |
+
|
| 608 |
+
class CV_EXPORTS Mat;
|
| 609 |
+
class CV_EXPORTS MatExpr;
|
| 610 |
+
|
| 611 |
+
class CV_EXPORTS UMat;
|
| 612 |
+
|
| 613 |
+
class CV_EXPORTS SparseMat;
|
| 614 |
+
typedef Mat MatND;
|
| 615 |
+
|
| 616 |
+
template<typename _Tp> class Mat_;
|
| 617 |
+
template<typename _Tp> class SparseMat_;
|
| 618 |
+
|
| 619 |
+
class CV_EXPORTS MatConstIterator;
|
| 620 |
+
class CV_EXPORTS SparseMatIterator;
|
| 621 |
+
class CV_EXPORTS SparseMatConstIterator;
|
| 622 |
+
template<typename _Tp> class MatIterator_;
|
| 623 |
+
template<typename _Tp> class MatConstIterator_;
|
| 624 |
+
template<typename _Tp> class SparseMatIterator_;
|
| 625 |
+
template<typename _Tp> class SparseMatConstIterator_;
|
| 626 |
+
|
| 627 |
+
namespace ogl
|
| 628 |
+
{
|
| 629 |
+
class CV_EXPORTS Buffer;
|
| 630 |
+
class CV_EXPORTS Texture2D;
|
| 631 |
+
class CV_EXPORTS Arrays;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
namespace cuda
|
| 635 |
+
{
|
| 636 |
+
class CV_EXPORTS GpuMat;
|
| 637 |
+
class CV_EXPORTS HostMem;
|
| 638 |
+
class CV_EXPORTS Stream;
|
| 639 |
+
class CV_EXPORTS Event;
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
namespace cudev
|
| 643 |
+
{
|
| 644 |
+
template <typename _Tp> class GpuMat_;
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
namespace ipp
|
| 648 |
+
{
|
| 649 |
+
CV_EXPORTS unsigned long long getIppFeatures();
|
| 650 |
+
CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
|
| 651 |
+
int line = 0);
|
| 652 |
+
CV_EXPORTS int getIppStatus();
|
| 653 |
+
CV_EXPORTS String getIppErrorLocation();
|
| 654 |
+
CV_EXPORTS_W bool useIPP();
|
| 655 |
+
CV_EXPORTS_W void setUseIPP(bool flag);
|
| 656 |
+
CV_EXPORTS_W String getIppVersion();
|
| 657 |
+
|
| 658 |
+
// IPP Not-Exact mode. This function may force use of IPP then both IPP and OpenCV provide proper results
|
| 659 |
+
// but have internal accuracy differences which have too much direct or indirect impact on accuracy tests.
|
| 660 |
+
CV_EXPORTS_W bool useIPP_NotExact();
|
| 661 |
+
CV_EXPORTS_W void setUseIPP_NotExact(bool flag);
|
| 662 |
+
#ifndef DISABLE_OPENCV_3_COMPATIBILITY
|
| 663 |
+
static inline bool useIPP_NE() { return useIPP_NotExact(); }
|
| 664 |
+
static inline void setUseIPP_NE(bool flag) { setUseIPP_NotExact(flag); }
|
| 665 |
+
#endif
|
| 666 |
+
|
| 667 |
+
} // ipp
|
| 668 |
+
|
| 669 |
+
//! @endcond
|
| 670 |
+
|
| 671 |
+
//! @} core_utils
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
} // cv
|
| 677 |
+
|
| 678 |
+
#include "opencv2/core/neon_utils.hpp"
|
| 679 |
+
#include "opencv2/core/vsx_utils.hpp"
|
| 680 |
+
#include "opencv2/core/check.hpp"
|
| 681 |
+
|
| 682 |
+
#endif //OPENCV_CORE_BASE_HPP
|
3rdparty/opencv/include/opencv2/core/bindings_utils.hpp
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of OpenCV project.
|
| 2 |
+
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
| 3 |
+
// of this distribution and at http://opencv.org/license.html.
|
| 4 |
+
|
| 5 |
+
#ifndef OPENCV_CORE_BINDINGS_UTILS_HPP
|
| 6 |
+
#define OPENCV_CORE_BINDINGS_UTILS_HPP
|
| 7 |
+
|
| 8 |
+
#include <opencv2/core/async.hpp>
|
| 9 |
+
#include <opencv2/core/detail/async_promise.hpp>
|
| 10 |
+
#include <opencv2/core/utils/logger.hpp>
|
| 11 |
+
|
| 12 |
+
#include <stdexcept>
|
| 13 |
+
|
| 14 |
+
namespace cv { namespace utils {
|
| 15 |
+
//! @addtogroup core_utils
|
| 16 |
+
//! @{
|
| 17 |
+
|
| 18 |
+
CV_EXPORTS_W String dumpInputArray(InputArray argument);
|
| 19 |
+
|
| 20 |
+
CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument);
|
| 21 |
+
|
| 22 |
+
CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument);
|
| 23 |
+
|
| 24 |
+
CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argument);
|
| 25 |
+
|
| 26 |
+
CV_WRAP static inline
|
| 27 |
+
String dumpBool(bool argument)
|
| 28 |
+
{
|
| 29 |
+
return (argument) ? String("Bool: True") : String("Bool: False");
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
CV_WRAP static inline
|
| 33 |
+
String dumpInt(int argument)
|
| 34 |
+
{
|
| 35 |
+
return cv::format("Int: %d", argument);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
CV_WRAP static inline
|
| 39 |
+
String dumpInt64(int64 argument)
|
| 40 |
+
{
|
| 41 |
+
std::ostringstream oss("Int64: ", std::ios::ate);
|
| 42 |
+
oss << argument;
|
| 43 |
+
return oss.str();
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
CV_WRAP static inline
|
| 47 |
+
String dumpSizeT(size_t argument)
|
| 48 |
+
{
|
| 49 |
+
std::ostringstream oss("size_t: ", std::ios::ate);
|
| 50 |
+
oss << argument;
|
| 51 |
+
return oss.str();
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
CV_WRAP static inline
|
| 55 |
+
String dumpFloat(float argument)
|
| 56 |
+
{
|
| 57 |
+
return cv::format("Float: %.2f", argument);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
CV_WRAP static inline
|
| 61 |
+
String dumpDouble(double argument)
|
| 62 |
+
{
|
| 63 |
+
return cv::format("Double: %.2f", argument);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
CV_WRAP static inline
|
| 67 |
+
String dumpCString(const char* argument)
|
| 68 |
+
{
|
| 69 |
+
return cv::format("String: %s", argument);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
CV_WRAP static inline
|
| 73 |
+
String dumpString(const String& argument)
|
| 74 |
+
{
|
| 75 |
+
return cv::format("String: %s", argument.c_str());
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
CV_WRAP static inline
|
| 79 |
+
String dumpRect(const Rect& argument)
|
| 80 |
+
{
|
| 81 |
+
return format("rect: (x=%d, y=%d, w=%d, h=%d)", argument.x, argument.y,
|
| 82 |
+
argument.width, argument.height);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
CV_WRAP static inline
|
| 86 |
+
String dumpTermCriteria(const TermCriteria& argument)
|
| 87 |
+
{
|
| 88 |
+
return format("term_criteria: (type=%d, max_count=%d, epsilon=%lf",
|
| 89 |
+
argument.type, argument.maxCount, argument.epsilon);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
CV_WRAP static inline
|
| 93 |
+
String dumpRotatedRect(const RotatedRect& argument)
|
| 94 |
+
{
|
| 95 |
+
return format("rotated_rect: (c_x=%f, c_y=%f, w=%f, h=%f, a=%f)",
|
| 96 |
+
argument.center.x, argument.center.y, argument.size.width,
|
| 97 |
+
argument.size.height, argument.angle);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
CV_WRAP static inline
|
| 101 |
+
String dumpRange(const Range& argument)
|
| 102 |
+
{
|
| 103 |
+
if (argument == Range::all())
|
| 104 |
+
{
|
| 105 |
+
return "range: all";
|
| 106 |
+
}
|
| 107 |
+
else
|
| 108 |
+
{
|
| 109 |
+
return format("range: (s=%d, e=%d)", argument.start, argument.end);
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
CV_EXPORTS_W String dumpVectorOfInt(const std::vector<int>& vec);
|
| 114 |
+
|
| 115 |
+
CV_EXPORTS_W String dumpVectorOfDouble(const std::vector<double>& vec);
|
| 116 |
+
|
| 117 |
+
CV_EXPORTS_W String dumpVectorOfRect(const std::vector<Rect>& vec);
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
//! @cond IGNORED
|
| 121 |
+
|
| 122 |
+
CV_WRAP static inline
|
| 123 |
+
String testOverloadResolution(int value, const Point& point = Point(42, 24))
|
| 124 |
+
{
|
| 125 |
+
return format("overload (int=%d, point=(x=%d, y=%d))", value, point.x,
|
| 126 |
+
point.y);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
CV_WRAP static inline
|
| 130 |
+
String testOverloadResolution(const Rect& rect)
|
| 131 |
+
{
|
| 132 |
+
return format("overload (rect=(x=%d, y=%d, w=%d, h=%d))", rect.x, rect.y,
|
| 133 |
+
rect.width, rect.height);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
CV_WRAP static inline
|
| 137 |
+
RotatedRect testRotatedRect(float x, float y, float w, float h, float angle)
|
| 138 |
+
{
|
| 139 |
+
return RotatedRect(Point2f(x, y), Size2f(w, h), angle);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
CV_WRAP static inline
|
| 143 |
+
std::vector<RotatedRect> testRotatedRectVector(float x, float y, float w, float h, float angle)
|
| 144 |
+
{
|
| 145 |
+
std::vector<RotatedRect> result;
|
| 146 |
+
for (int i = 0; i < 10; i++)
|
| 147 |
+
result.push_back(RotatedRect(Point2f(x + i, y + 2 * i), Size2f(w, h), angle + 10 * i));
|
| 148 |
+
return result;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
CV_WRAP static inline
|
| 152 |
+
int testOverwriteNativeMethod(int argument)
|
| 153 |
+
{
|
| 154 |
+
return argument;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
CV_WRAP static inline
|
| 158 |
+
String testReservedKeywordConversion(int positional_argument, int lambda = 2, int from = 3)
|
| 159 |
+
{
|
| 160 |
+
return format("arg=%d, lambda=%d, from=%d", positional_argument, lambda, from);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
CV_WRAP static inline
|
| 164 |
+
void generateVectorOfRect(size_t len, CV_OUT std::vector<Rect>& vec)
|
| 165 |
+
{
|
| 166 |
+
vec.resize(len);
|
| 167 |
+
if (len > 0)
|
| 168 |
+
{
|
| 169 |
+
RNG rng(12345);
|
| 170 |
+
Mat tmp(static_cast<int>(len), 1, CV_32SC4);
|
| 171 |
+
rng.fill(tmp, RNG::UNIFORM, 10, 20);
|
| 172 |
+
tmp.copyTo(vec);
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
CV_WRAP static inline
|
| 177 |
+
void generateVectorOfInt(size_t len, CV_OUT std::vector<int>& vec)
|
| 178 |
+
{
|
| 179 |
+
vec.resize(len);
|
| 180 |
+
if (len > 0)
|
| 181 |
+
{
|
| 182 |
+
RNG rng(554433);
|
| 183 |
+
Mat tmp(static_cast<int>(len), 1, CV_32SC1);
|
| 184 |
+
rng.fill(tmp, RNG::UNIFORM, -10, 10);
|
| 185 |
+
tmp.copyTo(vec);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
CV_WRAP static inline
|
| 190 |
+
void generateVectorOfMat(size_t len, int rows, int cols, int dtype, CV_OUT std::vector<Mat>& vec)
|
| 191 |
+
{
|
| 192 |
+
vec.resize(len);
|
| 193 |
+
if (len > 0)
|
| 194 |
+
{
|
| 195 |
+
RNG rng(65431);
|
| 196 |
+
for (size_t i = 0; i < len; ++i)
|
| 197 |
+
{
|
| 198 |
+
vec[i].create(rows, cols, dtype);
|
| 199 |
+
rng.fill(vec[i], RNG::UNIFORM, 0, 10);
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
CV_WRAP static inline
|
| 205 |
+
void testRaiseGeneralException()
|
| 206 |
+
{
|
| 207 |
+
throw std::runtime_error("exception text");
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
CV_WRAP static inline
|
| 211 |
+
AsyncArray testAsyncArray(InputArray argument)
|
| 212 |
+
{
|
| 213 |
+
AsyncPromise p;
|
| 214 |
+
p.setValue(argument);
|
| 215 |
+
return p.getArrayResult();
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
CV_WRAP static inline
|
| 219 |
+
AsyncArray testAsyncException()
|
| 220 |
+
{
|
| 221 |
+
AsyncPromise p;
|
| 222 |
+
try
|
| 223 |
+
{
|
| 224 |
+
CV_Error(Error::StsOk, "Test: Generated async error");
|
| 225 |
+
}
|
| 226 |
+
catch (const cv::Exception& e)
|
| 227 |
+
{
|
| 228 |
+
p.setException(e);
|
| 229 |
+
}
|
| 230 |
+
return p.getArrayResult();
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
CV_WRAP static inline
|
| 234 |
+
String dumpVec2i(const cv::Vec2i value = cv::Vec2i(42, 24)) {
|
| 235 |
+
return format("Vec2i(%d, %d)", value[0], value[1]);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
struct CV_EXPORTS_W_SIMPLE ClassWithKeywordProperties {
|
| 239 |
+
CV_PROP_RW int lambda;
|
| 240 |
+
CV_PROP int except;
|
| 241 |
+
|
| 242 |
+
CV_WRAP explicit ClassWithKeywordProperties(int lambda_arg = 24, int except_arg = 42)
|
| 243 |
+
{
|
| 244 |
+
lambda = lambda_arg;
|
| 245 |
+
except = except_arg;
|
| 246 |
+
}
|
| 247 |
+
};
|
| 248 |
+
|
| 249 |
+
struct CV_EXPORTS_W_PARAMS FunctionParams
|
| 250 |
+
{
|
| 251 |
+
CV_PROP_RW int lambda = -1;
|
| 252 |
+
CV_PROP_RW float sigma = 0.0f;
|
| 253 |
+
|
| 254 |
+
FunctionParams& setLambda(int value) CV_NOEXCEPT
|
| 255 |
+
{
|
| 256 |
+
lambda = value;
|
| 257 |
+
return *this;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
FunctionParams& setSigma(float value) CV_NOEXCEPT
|
| 261 |
+
{
|
| 262 |
+
sigma = value;
|
| 263 |
+
return *this;
|
| 264 |
+
}
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
CV_WRAP static inline String
|
| 268 |
+
copyMatAndDumpNamedArguments(InputArray src, OutputArray dst,
|
| 269 |
+
const FunctionParams& params = FunctionParams())
|
| 270 |
+
{
|
| 271 |
+
src.copyTo(dst);
|
| 272 |
+
return format("lambda=%d, sigma=%.1f", params.lambda,
|
| 273 |
+
params.sigma);
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
namespace nested {
|
| 277 |
+
CV_WRAP static inline bool testEchoBooleanFunction(bool flag) {
|
| 278 |
+
return flag;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
class CV_EXPORTS_W CV_WRAP_AS(ExportClassName) OriginalClassName
|
| 282 |
+
{
|
| 283 |
+
public:
|
| 284 |
+
struct CV_EXPORTS_W_SIMPLE Params
|
| 285 |
+
{
|
| 286 |
+
CV_PROP_RW int int_value;
|
| 287 |
+
CV_PROP_RW float float_value;
|
| 288 |
+
|
| 289 |
+
CV_WRAP explicit Params(int int_param = 123, float float_param = 3.5f)
|
| 290 |
+
{
|
| 291 |
+
int_value = int_param;
|
| 292 |
+
float_value = float_param;
|
| 293 |
+
}
|
| 294 |
+
};
|
| 295 |
+
|
| 296 |
+
explicit OriginalClassName(const OriginalClassName::Params& params = OriginalClassName::Params())
|
| 297 |
+
{
|
| 298 |
+
params_ = params;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
CV_WRAP int getIntParam() const
|
| 302 |
+
{
|
| 303 |
+
return params_.int_value;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
CV_WRAP float getFloatParam() const
|
| 307 |
+
{
|
| 308 |
+
return params_.float_value;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
CV_WRAP static std::string originalName()
|
| 312 |
+
{
|
| 313 |
+
return "OriginalClassName";
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
CV_WRAP static Ptr<OriginalClassName>
|
| 317 |
+
create(const OriginalClassName::Params& params = OriginalClassName::Params())
|
| 318 |
+
{
|
| 319 |
+
return makePtr<OriginalClassName>(params);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
private:
|
| 323 |
+
OriginalClassName::Params params_;
|
| 324 |
+
};
|
| 325 |
+
|
| 326 |
+
typedef OriginalClassName::Params OriginalClassName_Params;
|
| 327 |
+
} // namespace nested
|
| 328 |
+
|
| 329 |
+
//! @endcond IGNORED
|
| 330 |
+
|
| 331 |
+
namespace fs {
|
| 332 |
+
CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
|
| 333 |
+
} // namespace fs
|
| 334 |
+
|
| 335 |
+
//! @} // core_utils
|
| 336 |
+
} // namespace cv::utils
|
| 337 |
+
|
| 338 |
+
//! @cond IGNORED
|
| 339 |
+
|
| 340 |
+
CV_WRAP static inline
|
| 341 |
+
int setLogLevel(int level)
|
| 342 |
+
{
|
| 343 |
+
// NB: Binding generators doesn't work with enums properly yet, so we define separate overload here
|
| 344 |
+
return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
CV_WRAP static inline
|
| 348 |
+
int getLogLevel()
|
| 349 |
+
{
|
| 350 |
+
return cv::utils::logging::getLogLevel();
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
//! @endcond IGNORED
|
| 354 |
+
|
| 355 |
+
} // namespaces cv / utils
|
| 356 |
+
|
| 357 |
+
#endif // OPENCV_CORE_BINDINGS_UTILS_HPP
|
3rdparty/opencv/include/opencv2/core/bufferpool.hpp
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of OpenCV project.
|
| 2 |
+
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
| 3 |
+
// of this distribution and at http://opencv.org/license.html.
|
| 4 |
+
//
|
| 5 |
+
// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved.
|
| 6 |
+
|
| 7 |
+
#ifndef OPENCV_CORE_BUFFER_POOL_HPP
|
| 8 |
+
#define OPENCV_CORE_BUFFER_POOL_HPP
|
| 9 |
+
|
| 10 |
+
#ifdef _MSC_VER
|
| 11 |
+
#pragma warning(push)
|
| 12 |
+
#pragma warning(disable: 4265)
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace cv
|
| 16 |
+
{
|
| 17 |
+
|
| 18 |
+
//! @addtogroup core_opencl
|
| 19 |
+
//! @{
|
| 20 |
+
|
| 21 |
+
class BufferPoolController
|
| 22 |
+
{
|
| 23 |
+
protected:
|
| 24 |
+
~BufferPoolController() { }
|
| 25 |
+
public:
|
| 26 |
+
virtual size_t getReservedSize() const = 0;
|
| 27 |
+
virtual size_t getMaxReservedSize() const = 0;
|
| 28 |
+
virtual void setMaxReservedSize(size_t size) = 0;
|
| 29 |
+
virtual void freeAllReservedBuffers() = 0;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
//! @}
|
| 33 |
+
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
#ifdef _MSC_VER
|
| 37 |
+
#pragma warning(pop)
|
| 38 |
+
#endif
|
| 39 |
+
|
| 40 |
+
#endif // OPENCV_CORE_BUFFER_POOL_HPP
|
3rdparty/opencv/include/opencv2/core/check.hpp
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of OpenCV project.
|
| 2 |
+
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
| 3 |
+
// of this distribution and at http://opencv.org/license.html.
|
| 4 |
+
|
| 5 |
+
#ifndef OPENCV_CORE_CHECK_HPP
|
| 6 |
+
#define OPENCV_CORE_CHECK_HPP
|
| 7 |
+
|
| 8 |
+
#include <opencv2/core/base.hpp>
|
| 9 |
+
|
| 10 |
+
namespace cv {
|
| 11 |
+
|
| 12 |
+
/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or "<invalid depth>" */
|
| 13 |
+
CV_EXPORTS const char* depthToString(int depth);
|
| 14 |
+
|
| 15 |
+
/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or "<invalid type>" */
|
| 16 |
+
CV_EXPORTS String typeToString(int type);
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
//! @cond IGNORED
|
| 20 |
+
namespace detail {
|
| 21 |
+
|
| 22 |
+
/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or NULL */
|
| 23 |
+
CV_EXPORTS const char* depthToString_(int depth);
|
| 24 |
+
|
| 25 |
+
/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or cv::String() */
|
| 26 |
+
CV_EXPORTS cv::String typeToString_(int type);
|
| 27 |
+
|
| 28 |
+
enum TestOp {
|
| 29 |
+
TEST_CUSTOM = 0,
|
| 30 |
+
TEST_EQ = 1,
|
| 31 |
+
TEST_NE = 2,
|
| 32 |
+
TEST_LE = 3,
|
| 33 |
+
TEST_LT = 4,
|
| 34 |
+
TEST_GE = 5,
|
| 35 |
+
TEST_GT = 6,
|
| 36 |
+
CV__LAST_TEST_OP
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct CheckContext {
|
| 40 |
+
const char* func;
|
| 41 |
+
const char* file;
|
| 42 |
+
int line;
|
| 43 |
+
enum TestOp testOp;
|
| 44 |
+
const char* message;
|
| 45 |
+
const char* p1_str;
|
| 46 |
+
const char* p2_str;
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
#ifndef CV__CHECK_FILENAME
|
| 50 |
+
# define CV__CHECK_FILENAME __FILE__
|
| 51 |
+
#endif
|
| 52 |
+
|
| 53 |
+
#ifndef CV__CHECK_FUNCTION
|
| 54 |
+
# if defined _MSC_VER
|
| 55 |
+
# define CV__CHECK_FUNCTION __FUNCSIG__
|
| 56 |
+
# elif defined __GNUC__
|
| 57 |
+
# define CV__CHECK_FUNCTION __PRETTY_FUNCTION__
|
| 58 |
+
# else
|
| 59 |
+
# define CV__CHECK_FUNCTION "<unknown>"
|
| 60 |
+
# endif
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
#define CV__CHECK_LOCATION_VARNAME(id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_check_, id), __LINE__)
|
| 64 |
+
#define CV__DEFINE_CHECK_CONTEXT(id, message, testOp, p1_str, p2_str) \
|
| 65 |
+
static const cv::detail::CheckContext CV__CHECK_LOCATION_VARNAME(id) = \
|
| 66 |
+
{ CV__CHECK_FUNCTION, CV__CHECK_FILENAME, __LINE__, testOp, "" message, "" p1_str, "" p2_str }
|
| 67 |
+
|
| 68 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const bool v1, const bool v2, const CheckContext& ctx);
|
| 69 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const int v1, const int v2, const CheckContext& ctx);
|
| 70 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v1, const size_t v2, const CheckContext& ctx);
|
| 71 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const float v1, const float v2, const CheckContext& ctx);
|
| 72 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const double v1, const double v2, const CheckContext& ctx);
|
| 73 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_<int> v1, const Size_<int> v2, const CheckContext& ctx);
|
| 74 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v1, const int v2, const CheckContext& ctx);
|
| 75 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v1, const int v2, const CheckContext& ctx);
|
| 76 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v1, const int v2, const CheckContext& ctx);
|
| 77 |
+
|
| 78 |
+
CV_EXPORTS void CV_NORETURN check_failed_true(const bool v, const CheckContext& ctx);
|
| 79 |
+
CV_EXPORTS void CV_NORETURN check_failed_false(const bool v, const CheckContext& ctx);
|
| 80 |
+
|
| 81 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const int v, const CheckContext& ctx);
|
| 82 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v, const CheckContext& ctx);
|
| 83 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const float v, const CheckContext& ctx);
|
| 84 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const double v, const CheckContext& ctx);
|
| 85 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_<int> v, const CheckContext& ctx);
|
| 86 |
+
CV_EXPORTS void CV_NORETURN check_failed_auto(const std::string& v1, const CheckContext& ctx);
|
| 87 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v, const CheckContext& ctx);
|
| 88 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v, const CheckContext& ctx);
|
| 89 |
+
CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v, const CheckContext& ctx);
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
#define CV__TEST_EQ(v1, v2) ((v1) == (v2))
|
| 93 |
+
#define CV__TEST_NE(v1, v2) ((v1) != (v2))
|
| 94 |
+
#define CV__TEST_LE(v1, v2) ((v1) <= (v2))
|
| 95 |
+
#define CV__TEST_LT(v1, v2) ((v1) < (v2))
|
| 96 |
+
#define CV__TEST_GE(v1, v2) ((v1) >= (v2))
|
| 97 |
+
#define CV__TEST_GT(v1, v2) ((v1) > (v2))
|
| 98 |
+
|
| 99 |
+
#define CV__CHECK(id, op, type, v1, v2, v1_str, v2_str, msg_str) do { \
|
| 100 |
+
if(CV__TEST_##op((v1), (v2))) ; else { \
|
| 101 |
+
CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_ ## op, v1_str, v2_str); \
|
| 102 |
+
cv::detail::check_failed_ ## type((v1), (v2), CV__CHECK_LOCATION_VARNAME(id)); \
|
| 103 |
+
} \
|
| 104 |
+
} while (0)
|
| 105 |
+
|
| 106 |
+
#define CV__CHECK_CUSTOM_TEST(id, type, v, test_expr, v_str, test_expr_str, msg_str) do { \
|
| 107 |
+
if(!!(test_expr)) ; else { \
|
| 108 |
+
CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_CUSTOM, v_str, test_expr_str); \
|
| 109 |
+
cv::detail::check_failed_ ## type((v), CV__CHECK_LOCATION_VARNAME(id)); \
|
| 110 |
+
} \
|
| 111 |
+
} while (0)
|
| 112 |
+
|
| 113 |
+
} // namespace
|
| 114 |
+
//! @endcond
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
/// Supported values of these types: int, float, double
|
| 118 |
+
#define CV_CheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg)
|
| 119 |
+
#define CV_CheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg)
|
| 120 |
+
#define CV_CheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg)
|
| 121 |
+
#define CV_CheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg)
|
| 122 |
+
#define CV_CheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg)
|
| 123 |
+
#define CV_CheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg)
|
| 124 |
+
|
| 125 |
+
/// Check with additional "decoding" of type values in error message
|
| 126 |
+
#define CV_CheckTypeEQ(t1, t2, msg) CV__CHECK(_, EQ, MatType, t1, t2, #t1, #t2, msg)
|
| 127 |
+
/// Check with additional "decoding" of depth values in error message
|
| 128 |
+
#define CV_CheckDepthEQ(d1, d2, msg) CV__CHECK(_, EQ, MatDepth, d1, d2, #d1, #d2, msg)
|
| 129 |
+
|
| 130 |
+
#define CV_CheckChannelsEQ(c1, c2, msg) CV__CHECK(_, EQ, MatChannels, c1, c2, #c1, #c2, msg)
|
| 131 |
+
|
| 132 |
+
/// Example: type == CV_8UC1 || type == CV_8UC3
|
| 133 |
+
#define CV_CheckType(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatType, t, (test_expr), #t, #test_expr, msg)
|
| 134 |
+
|
| 135 |
+
/// Example: depth == CV_32F || depth == CV_64F
|
| 136 |
+
#define CV_CheckDepth(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatDepth, t, (test_expr), #t, #test_expr, msg)
|
| 137 |
+
|
| 138 |
+
/// Example: channel == 1 || channel == 3
|
| 139 |
+
#define CV_CheckChannels(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatChannels, t, (test_expr), #t, #test_expr, msg)
|
| 140 |
+
|
| 141 |
+
/// Example: v == A || v == B
|
| 142 |
+
#define CV_Check(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg)
|
| 143 |
+
|
| 144 |
+
/// Example: v == true
|
| 145 |
+
#define CV_CheckTrue(v, msg) CV__CHECK_CUSTOM_TEST(_, true, v, v, #v, "", msg)
|
| 146 |
+
|
| 147 |
+
/// Example: v == false
|
| 148 |
+
#define CV_CheckFalse(v, msg) CV__CHECK_CUSTOM_TEST(_, false, v, (!(v)), #v, "", msg)
|
| 149 |
+
|
| 150 |
+
/// Some complex conditions: CV_Check(src2, src2.empty() || (src2.type() == src1.type() && src2.size() == src1.size()), "src2 should have same size/type as src1")
|
| 151 |
+
// TODO define pretty-printers
|
| 152 |
+
|
| 153 |
+
#ifndef NDEBUG
|
| 154 |
+
#define CV_DbgCheck(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg)
|
| 155 |
+
#define CV_DbgCheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg)
|
| 156 |
+
#define CV_DbgCheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg)
|
| 157 |
+
#define CV_DbgCheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg)
|
| 158 |
+
#define CV_DbgCheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg)
|
| 159 |
+
#define CV_DbgCheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg)
|
| 160 |
+
#define CV_DbgCheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg)
|
| 161 |
+
#else
|
| 162 |
+
#define CV_DbgCheck(v, test_expr, msg) do { } while (0)
|
| 163 |
+
#define CV_DbgCheckEQ(v1, v2, msg) do { } while (0)
|
| 164 |
+
#define CV_DbgCheckNE(v1, v2, msg) do { } while (0)
|
| 165 |
+
#define CV_DbgCheckLE(v1, v2, msg) do { } while (0)
|
| 166 |
+
#define CV_DbgCheckLT(v1, v2, msg) do { } while (0)
|
| 167 |
+
#define CV_DbgCheckGE(v1, v2, msg) do { } while (0)
|
| 168 |
+
#define CV_DbgCheckGT(v1, v2, msg) do { } while (0)
|
| 169 |
+
#endif
|
| 170 |
+
|
| 171 |
+
} // namespace
|
| 172 |
+
|
| 173 |
+
#endif // OPENCV_CORE_CHECK_HPP
|
3rdparty/opencv/include/opencv2/core/core.hpp
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifdef __OPENCV_BUILD
|
| 45 |
+
#error this is a compatibility header which should not be used inside the OpenCV library
|
| 46 |
+
#endif
|
| 47 |
+
|
| 48 |
+
#include "opencv2/core.hpp"
|
3rdparty/opencv/include/opencv2/core/core_c.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3rdparty/opencv/include/opencv2/core/cuda.hpp
ADDED
|
@@ -0,0 +1,1339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifndef OPENCV_CORE_CUDA_HPP
|
| 45 |
+
#define OPENCV_CORE_CUDA_HPP
|
| 46 |
+
|
| 47 |
+
#ifndef __cplusplus
|
| 48 |
+
# error cuda.hpp header must be compiled as C++
|
| 49 |
+
#endif
|
| 50 |
+
|
| 51 |
+
#include "opencv2/core.hpp"
|
| 52 |
+
#include "opencv2/core/cuda_types.hpp"
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
@defgroup cuda CUDA-accelerated Computer Vision
|
| 56 |
+
@{
|
| 57 |
+
@defgroup cudacore Core part
|
| 58 |
+
@{
|
| 59 |
+
@defgroup cudacore_init Initialization and Information
|
| 60 |
+
@defgroup cudacore_struct Data Structures
|
| 61 |
+
@}
|
| 62 |
+
@}
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
namespace cv { namespace cuda {
|
| 66 |
+
|
| 67 |
+
//! @addtogroup cudacore_struct
|
| 68 |
+
//! @{
|
| 69 |
+
|
| 70 |
+
//===================================================================================
|
| 71 |
+
// GpuMat
|
| 72 |
+
//===================================================================================
|
| 73 |
+
|
| 74 |
+
/** @brief Base storage class for GPU memory with reference counting.
|
| 75 |
+
|
| 76 |
+
Its interface matches the Mat interface with the following limitations:
|
| 77 |
+
|
| 78 |
+
- no arbitrary dimensions support (only 2D)
|
| 79 |
+
- no functions that return references to their data (because references on GPU are not valid for
|
| 80 |
+
CPU)
|
| 81 |
+
- no expression templates technique support
|
| 82 |
+
|
| 83 |
+
Beware that the latter limitation may lead to overloaded matrix operators that cause memory
|
| 84 |
+
allocations. The GpuMat class is convertible to cuda::PtrStepSz and cuda::PtrStep so it can be
|
| 85 |
+
passed directly to the kernel.
|
| 86 |
+
|
| 87 |
+
@note In contrast with Mat, in most cases GpuMat::isContinuous() == false . This means that rows are
|
| 88 |
+
aligned to a size depending on the hardware. Single-row GpuMat is always a continuous matrix.
|
| 89 |
+
|
| 90 |
+
@note You are not recommended to leave static or global GpuMat variables allocated, that is, to rely
|
| 91 |
+
on its destructor. The destruction order of such variables and CUDA context is undefined. GPU memory
|
| 92 |
+
release function returns error if the CUDA context has been destroyed before.
|
| 93 |
+
|
| 94 |
+
Some member functions are described as a "Blocking Call" while some are described as a
|
| 95 |
+
"Non-Blocking Call". Blocking functions are synchronous to host. It is guaranteed that the GPU
|
| 96 |
+
operation is finished when the function returns. However, non-blocking functions are asynchronous to
|
| 97 |
+
host. Those functions may return even if the GPU operation is not finished.
|
| 98 |
+
|
| 99 |
+
Compared to their blocking counterpart, non-blocking functions accept Stream as an additional
|
| 100 |
+
argument. If a non-default stream is passed, the GPU operation may overlap with operations in other
|
| 101 |
+
streams.
|
| 102 |
+
|
| 103 |
+
@sa Mat
|
| 104 |
+
*/
|
| 105 |
+
class CV_EXPORTS_W GpuMat
|
| 106 |
+
{
|
| 107 |
+
public:
|
| 108 |
+
class CV_EXPORTS_W Allocator
|
| 109 |
+
{
|
| 110 |
+
public:
|
| 111 |
+
virtual ~Allocator() {}
|
| 112 |
+
|
| 113 |
+
// allocator must fill data, step and refcount fields
|
| 114 |
+
virtual bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) = 0;
|
| 115 |
+
virtual void free(GpuMat* mat) = 0;
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
//! default allocator
|
| 119 |
+
CV_WRAP static GpuMat::Allocator* defaultAllocator();
|
| 120 |
+
CV_WRAP static void setDefaultAllocator(GpuMat::Allocator* allocator);
|
| 121 |
+
CV_WRAP static GpuMat::Allocator* getStdAllocator();
|
| 122 |
+
|
| 123 |
+
//! default constructor
|
| 124 |
+
CV_WRAP explicit GpuMat(GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 125 |
+
|
| 126 |
+
//! constructs GpuMat of the specified size and type
|
| 127 |
+
CV_WRAP GpuMat(int rows, int cols, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 128 |
+
CV_WRAP GpuMat(Size size, int type, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 129 |
+
|
| 130 |
+
//! constructs GpuMat and fills it with the specified value _s
|
| 131 |
+
CV_WRAP GpuMat(int rows, int cols, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 132 |
+
CV_WRAP GpuMat(Size size, int type, Scalar s, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 133 |
+
|
| 134 |
+
//! copy constructor
|
| 135 |
+
CV_WRAP GpuMat(const GpuMat& m);
|
| 136 |
+
|
| 137 |
+
//! constructor for GpuMat headers pointing to user-allocated data
|
| 138 |
+
GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
|
| 139 |
+
GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
|
| 140 |
+
|
| 141 |
+
//! creates a GpuMat header for a part of the bigger matrix
|
| 142 |
+
CV_WRAP GpuMat(const GpuMat& m, Range rowRange, Range colRange);
|
| 143 |
+
CV_WRAP GpuMat(const GpuMat& m, Rect roi);
|
| 144 |
+
|
| 145 |
+
//! builds GpuMat from host memory (Blocking call)
|
| 146 |
+
CV_WRAP explicit GpuMat(InputArray arr, GpuMat::Allocator* allocator = GpuMat::defaultAllocator());
|
| 147 |
+
|
| 148 |
+
//! destructor - calls release()
|
| 149 |
+
~GpuMat();
|
| 150 |
+
|
| 151 |
+
//! assignment operators
|
| 152 |
+
GpuMat& operator =(const GpuMat& m);
|
| 153 |
+
|
| 154 |
+
//! allocates new GpuMat data unless the GpuMat already has specified size and type
|
| 155 |
+
CV_WRAP void create(int rows, int cols, int type);
|
| 156 |
+
CV_WRAP void create(Size size, int type);
|
| 157 |
+
|
| 158 |
+
//! decreases reference counter, deallocate the data when reference counter reaches 0
|
| 159 |
+
CV_WRAP void release();
|
| 160 |
+
|
| 161 |
+
//! swaps with other smart pointer
|
| 162 |
+
CV_WRAP void swap(GpuMat& mat);
|
| 163 |
+
|
| 164 |
+
/** @brief Performs data upload to GpuMat (Blocking call)
|
| 165 |
+
|
| 166 |
+
This function copies data from host memory to device memory. As being a blocking call, it is
|
| 167 |
+
guaranteed that the copy operation is finished when this function returns.
|
| 168 |
+
*/
|
| 169 |
+
CV_WRAP void upload(InputArray arr);
|
| 170 |
+
|
| 171 |
+
/** @brief Performs data upload to GpuMat (Non-Blocking call)
|
| 172 |
+
|
| 173 |
+
This function copies data from host memory to device memory. As being a non-blocking call, this
|
| 174 |
+
function may return even if the copy operation is not finished.
|
| 175 |
+
|
| 176 |
+
The copy operation may be overlapped with operations in other non-default streams if \p stream is
|
| 177 |
+
not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option.
|
| 178 |
+
*/
|
| 179 |
+
CV_WRAP void upload(InputArray arr, Stream& stream);
|
| 180 |
+
|
| 181 |
+
/** @brief Performs data download from GpuMat (Blocking call)
|
| 182 |
+
|
| 183 |
+
This function copies data from device memory to host memory. As being a blocking call, it is
|
| 184 |
+
guaranteed that the copy operation is finished when this function returns.
|
| 185 |
+
*/
|
| 186 |
+
CV_WRAP void download(OutputArray dst) const;
|
| 187 |
+
|
| 188 |
+
/** @brief Performs data download from GpuMat (Non-Blocking call)
|
| 189 |
+
|
| 190 |
+
This function copies data from device memory to host memory. As being a non-blocking call, this
|
| 191 |
+
function may return even if the copy operation is not finished.
|
| 192 |
+
|
| 193 |
+
The copy operation may be overlapped with operations in other non-default streams if \p stream is
|
| 194 |
+
not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option.
|
| 195 |
+
*/
|
| 196 |
+
CV_WRAP void download(OutputArray dst, Stream& stream) const;
|
| 197 |
+
|
| 198 |
+
//! returns deep copy of the GpuMat, i.e. the data is copied
|
| 199 |
+
CV_WRAP GpuMat clone() const;
|
| 200 |
+
|
| 201 |
+
//! copies the GpuMat content to device memory (Blocking call)
|
| 202 |
+
void copyTo(OutputArray dst) const;
|
| 203 |
+
//! bindings overload which copies the GpuMat content to device memory (Blocking call)
|
| 204 |
+
CV_WRAP void copyTo(CV_OUT GpuMat& dst) const {
|
| 205 |
+
copyTo(static_cast<OutputArray>(dst));
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
//! copies the GpuMat content to device memory (Non-Blocking call)
|
| 209 |
+
void copyTo(OutputArray dst, Stream& stream) const;
|
| 210 |
+
//! bindings overload which copies the GpuMat content to device memory (Non-Blocking call)
|
| 211 |
+
CV_WRAP void copyTo(CV_OUT GpuMat& dst, Stream& stream) const {
|
| 212 |
+
copyTo(static_cast<OutputArray>(dst), stream);
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
//! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
|
| 216 |
+
void copyTo(OutputArray dst, InputArray mask) const;
|
| 217 |
+
//! bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
|
| 218 |
+
CV_WRAP void copyTo(CV_OUT GpuMat& dst, GpuMat& mask) const {
|
| 219 |
+
copyTo(static_cast<OutputArray>(dst), static_cast<InputArray>(mask));
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
//! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
|
| 223 |
+
void copyTo(OutputArray dst, InputArray mask, Stream& stream) const;
|
| 224 |
+
//! bindings overload which copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
|
| 225 |
+
CV_WRAP void copyTo(CV_OUT GpuMat& dst, GpuMat& mask, Stream& stream) const {
|
| 226 |
+
copyTo(static_cast<OutputArray>(dst), static_cast<InputArray>(mask), stream);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
//! sets some of the GpuMat elements to s (Blocking call)
|
| 230 |
+
CV_WRAP GpuMat& setTo(Scalar s);
|
| 231 |
+
|
| 232 |
+
//! sets some of the GpuMat elements to s (Non-Blocking call)
|
| 233 |
+
CV_WRAP GpuMat& setTo(Scalar s, Stream& stream);
|
| 234 |
+
|
| 235 |
+
//! sets some of the GpuMat elements to s, according to the mask (Blocking call)
|
| 236 |
+
CV_WRAP GpuMat& setTo(Scalar s, InputArray mask);
|
| 237 |
+
|
| 238 |
+
//! sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)
|
| 239 |
+
CV_WRAP GpuMat& setTo(Scalar s, InputArray mask, Stream& stream);
|
| 240 |
+
|
| 241 |
+
//! converts GpuMat to another datatype (Blocking call)
|
| 242 |
+
void convertTo(OutputArray dst, int rtype) const;
|
| 243 |
+
|
| 244 |
+
//! converts GpuMat to another datatype (Non-Blocking call)
|
| 245 |
+
void convertTo(OutputArray dst, int rtype, Stream& stream) const;
|
| 246 |
+
//! bindings overload which converts GpuMat to another datatype (Non-Blocking call)
|
| 247 |
+
CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, Stream& stream) const {
|
| 248 |
+
convertTo(static_cast<OutputArray>(dst), rtype, stream);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
//! converts GpuMat to another datatype with scaling (Blocking call)
|
| 252 |
+
void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const;
|
| 253 |
+
//! bindings overload which converts GpuMat to another datatype with scaling(Blocking call)
|
| 254 |
+
CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, double alpha = 1.0, double beta = 0.0) const {
|
| 255 |
+
convertTo(static_cast<OutputArray>(dst), rtype, alpha, beta);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
//! converts GpuMat to another datatype with scaling (Non-Blocking call)
|
| 259 |
+
void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const;
|
| 260 |
+
|
| 261 |
+
//! converts GpuMat to another datatype with scaling (Non-Blocking call)
|
| 262 |
+
void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const;
|
| 263 |
+
//! bindings overload which converts GpuMat to another datatype with scaling (Non-Blocking call)
|
| 264 |
+
CV_WRAP void convertTo(CV_OUT GpuMat& dst, int rtype, double alpha, double beta, Stream& stream) const {
|
| 265 |
+
convertTo(static_cast<OutputArray>(dst), rtype, alpha, beta, stream);
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
CV_WRAP void assignTo(GpuMat& m, int type = -1) const;
|
| 269 |
+
|
| 270 |
+
//! returns pointer to y-th row
|
| 271 |
+
uchar* ptr(int y = 0);
|
| 272 |
+
const uchar* ptr(int y = 0) const;
|
| 273 |
+
|
| 274 |
+
//! template version of the above method
|
| 275 |
+
template<typename _Tp> _Tp* ptr(int y = 0);
|
| 276 |
+
template<typename _Tp> const _Tp* ptr(int y = 0) const;
|
| 277 |
+
|
| 278 |
+
template <typename _Tp> operator PtrStepSz<_Tp>() const;
|
| 279 |
+
template <typename _Tp> operator PtrStep<_Tp>() const;
|
| 280 |
+
|
| 281 |
+
//! returns a new GpuMat header for the specified row
|
| 282 |
+
CV_WRAP GpuMat row(int y) const;
|
| 283 |
+
|
| 284 |
+
//! returns a new GpuMat header for the specified column
|
| 285 |
+
CV_WRAP GpuMat col(int x) const;
|
| 286 |
+
|
| 287 |
+
//! ... for the specified row span
|
| 288 |
+
CV_WRAP GpuMat rowRange(int startrow, int endrow) const;
|
| 289 |
+
CV_WRAP GpuMat rowRange(Range r) const;
|
| 290 |
+
|
| 291 |
+
//! ... for the specified column span
|
| 292 |
+
CV_WRAP GpuMat colRange(int startcol, int endcol) const;
|
| 293 |
+
CV_WRAP GpuMat colRange(Range r) const;
|
| 294 |
+
|
| 295 |
+
//! extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.)
|
| 296 |
+
GpuMat operator ()(Range rowRange, Range colRange) const;
|
| 297 |
+
GpuMat operator ()(Rect roi) const;
|
| 298 |
+
|
| 299 |
+
//! creates alternative GpuMat header for the same data, with different
|
| 300 |
+
//! number of channels and/or different number of rows
|
| 301 |
+
CV_WRAP GpuMat reshape(int cn, int rows = 0) const;
|
| 302 |
+
|
| 303 |
+
//! locates GpuMat header within a parent GpuMat
|
| 304 |
+
CV_WRAP void locateROI(Size& wholeSize, Point& ofs) const;
|
| 305 |
+
|
| 306 |
+
//! moves/resizes the current GpuMat ROI inside the parent GpuMat
|
| 307 |
+
CV_WRAP GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
|
| 308 |
+
|
| 309 |
+
//! returns true iff the GpuMat data is continuous
|
| 310 |
+
//! (i.e. when there are no gaps between successive rows)
|
| 311 |
+
CV_WRAP bool isContinuous() const;
|
| 312 |
+
|
| 313 |
+
//! returns element size in bytes
|
| 314 |
+
CV_WRAP size_t elemSize() const;
|
| 315 |
+
|
| 316 |
+
//! returns the size of element channel in bytes
|
| 317 |
+
CV_WRAP size_t elemSize1() const;
|
| 318 |
+
|
| 319 |
+
//! returns element type
|
| 320 |
+
CV_WRAP int type() const;
|
| 321 |
+
|
| 322 |
+
//! returns element type
|
| 323 |
+
CV_WRAP int depth() const;
|
| 324 |
+
|
| 325 |
+
//! returns number of channels
|
| 326 |
+
CV_WRAP int channels() const;
|
| 327 |
+
|
| 328 |
+
//! returns step/elemSize1()
|
| 329 |
+
CV_WRAP size_t step1() const;
|
| 330 |
+
|
| 331 |
+
//! returns GpuMat size : width == number of columns, height == number of rows
|
| 332 |
+
CV_WRAP Size size() const;
|
| 333 |
+
|
| 334 |
+
//! returns true if GpuMat data is NULL
|
| 335 |
+
CV_WRAP bool empty() const;
|
| 336 |
+
|
| 337 |
+
// returns pointer to cuda memory
|
| 338 |
+
CV_WRAP void* cudaPtr() const;
|
| 339 |
+
|
| 340 |
+
//! internal use method: updates the continuity flag
|
| 341 |
+
CV_WRAP void updateContinuityFlag();
|
| 342 |
+
|
| 343 |
+
/*! includes several bit-fields:
|
| 344 |
+
- the magic signature
|
| 345 |
+
- continuity flag
|
| 346 |
+
- depth
|
| 347 |
+
- number of channels
|
| 348 |
+
*/
|
| 349 |
+
int flags;
|
| 350 |
+
|
| 351 |
+
//! the number of rows and columns
|
| 352 |
+
int rows, cols;
|
| 353 |
+
|
| 354 |
+
//! a distance between successive rows in bytes; includes the gap if any
|
| 355 |
+
CV_PROP size_t step;
|
| 356 |
+
|
| 357 |
+
//! pointer to the data
|
| 358 |
+
uchar* data;
|
| 359 |
+
|
| 360 |
+
//! pointer to the reference counter;
|
| 361 |
+
//! when GpuMat points to user-allocated data, the pointer is NULL
|
| 362 |
+
int* refcount;
|
| 363 |
+
|
| 364 |
+
//! helper fields used in locateROI and adjustROI
|
| 365 |
+
uchar* datastart;
|
| 366 |
+
const uchar* dataend;
|
| 367 |
+
|
| 368 |
+
//! allocator
|
| 369 |
+
Allocator* allocator;
|
| 370 |
+
};
|
| 371 |
+
|
| 372 |
+
struct CV_EXPORTS_W GpuData
|
| 373 |
+
{
|
| 374 |
+
explicit GpuData(size_t _size);
|
| 375 |
+
~GpuData();
|
| 376 |
+
|
| 377 |
+
GpuData(const GpuData&) = delete;
|
| 378 |
+
GpuData& operator=(const GpuData&) = delete;
|
| 379 |
+
|
| 380 |
+
GpuData(GpuData&&) = delete;
|
| 381 |
+
GpuData& operator=(GpuData&&) = delete;
|
| 382 |
+
|
| 383 |
+
uchar* data;
|
| 384 |
+
size_t size;
|
| 385 |
+
};
|
| 386 |
+
|
| 387 |
+
class CV_EXPORTS_W GpuMatND
|
| 388 |
+
{
|
| 389 |
+
public:
|
| 390 |
+
using SizeArray = std::vector<int>;
|
| 391 |
+
using StepArray = std::vector<size_t>;
|
| 392 |
+
using IndexArray = std::vector<int>;
|
| 393 |
+
|
| 394 |
+
//! destructor
|
| 395 |
+
~GpuMatND();
|
| 396 |
+
|
| 397 |
+
//! default constructor
|
| 398 |
+
GpuMatND();
|
| 399 |
+
|
| 400 |
+
/** @overload
|
| 401 |
+
@param size Array of integers specifying an n-dimensional array shape.
|
| 402 |
+
@param type Array type. Use CV_8UC1, ..., CV_16FC4 to create 1-4 channel matrices, or
|
| 403 |
+
CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.
|
| 404 |
+
*/
|
| 405 |
+
GpuMatND(SizeArray size, int type);
|
| 406 |
+
|
| 407 |
+
/** @overload
|
| 408 |
+
@param size Array of integers specifying an n-dimensional array shape.
|
| 409 |
+
@param type Array type. Use CV_8UC1, ..., CV_16FC4 to create 1-4 channel matrices, or
|
| 410 |
+
CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices.
|
| 411 |
+
@param data Pointer to the user data. Matrix constructors that take data and step parameters do not
|
| 412 |
+
allocate matrix data. Instead, they just initialize the matrix header that points to the specified
|
| 413 |
+
data, which means that no data is copied. This operation is very efficient and can be used to
|
| 414 |
+
process external data using OpenCV functions. The external data is not automatically deallocated, so
|
| 415 |
+
you should take care of it.
|
| 416 |
+
@param step Array of _size.size() or _size.size()-1 steps in case of a multi-dimensional array
|
| 417 |
+
(if specified, the last step must be equal to the element size, otherwise it will be added as such).
|
| 418 |
+
If not specified, the matrix is assumed to be continuous.
|
| 419 |
+
*/
|
| 420 |
+
GpuMatND(SizeArray size, int type, void* data, StepArray step = StepArray());
|
| 421 |
+
|
| 422 |
+
/** @brief Allocates GPU memory.
|
| 423 |
+
Suppose there is some GPU memory already allocated. In that case, this method may choose to reuse that
|
| 424 |
+
GPU memory under the specific condition: it must be of the same size and type, not externally allocated,
|
| 425 |
+
the GPU memory is continuous(i.e., isContinuous() is true), and is not a sub-matrix of another GpuMatND
|
| 426 |
+
(i.e., isSubmatrix() is false). In other words, this method guarantees that the GPU memory allocated by
|
| 427 |
+
this method is always continuous and is not a sub-region of another GpuMatND.
|
| 428 |
+
*/
|
| 429 |
+
void create(SizeArray size, int type);
|
| 430 |
+
|
| 431 |
+
void release();
|
| 432 |
+
|
| 433 |
+
void swap(GpuMatND& m) noexcept;
|
| 434 |
+
|
| 435 |
+
/** @brief Creates a full copy of the array and the underlying data.
|
| 436 |
+
The method creates a full copy of the array. It mimics the behavior of Mat::clone(), i.e.
|
| 437 |
+
the original step is not taken into account. So, the array copy is a continuous array
|
| 438 |
+
occupying total()\*elemSize() bytes.
|
| 439 |
+
*/
|
| 440 |
+
GpuMatND clone() const;
|
| 441 |
+
|
| 442 |
+
/** @overload
|
| 443 |
+
This overload is non-blocking, so it may return even if the copy operation is not finished.
|
| 444 |
+
*/
|
| 445 |
+
GpuMatND clone(Stream& stream) const;
|
| 446 |
+
|
| 447 |
+
/** @brief Extracts a sub-matrix.
|
| 448 |
+
The operator makes a new header for the specified sub-array of \*this.
|
| 449 |
+
The operator is an O(1) operation, that is, no matrix data is copied.
|
| 450 |
+
@param ranges Array of selected ranges along each dimension.
|
| 451 |
+
*/
|
| 452 |
+
GpuMatND operator()(const std::vector<Range>& ranges) const;
|
| 453 |
+
|
| 454 |
+
/** @brief Creates a GpuMat header for a 2D plane part of an n-dim matrix.
|
| 455 |
+
@note The returned GpuMat is constructed with the constructor for user-allocated data.
|
| 456 |
+
That is, It does not perform reference counting.
|
| 457 |
+
@note This function does not increment this GpuMatND's reference counter.
|
| 458 |
+
*/
|
| 459 |
+
GpuMat createGpuMatHeader(IndexArray idx, Range rowRange, Range colRange) const;
|
| 460 |
+
|
| 461 |
+
/** @overload
|
| 462 |
+
Creates a GpuMat header if this GpuMatND is effectively 2D.
|
| 463 |
+
@note The returned GpuMat is constructed with the constructor for user-allocated data.
|
| 464 |
+
That is, It does not perform reference counting.
|
| 465 |
+
@note This function does not increment this GpuMatND's reference counter.
|
| 466 |
+
*/
|
| 467 |
+
GpuMat createGpuMatHeader() const;
|
| 468 |
+
|
| 469 |
+
/** @brief Extracts a 2D plane part of an n-dim matrix.
|
| 470 |
+
It differs from createGpuMatHeader(IndexArray, Range, Range) in that it clones a part of this
|
| 471 |
+
GpuMatND to the returned GpuMat.
|
| 472 |
+
@note This operator does not increment this GpuMatND's reference counter;
|
| 473 |
+
*/
|
| 474 |
+
GpuMat operator()(IndexArray idx, Range rowRange, Range colRange) const;
|
| 475 |
+
|
| 476 |
+
/** @brief Extracts a 2D plane part of an n-dim matrix if this GpuMatND is effectively 2D.
|
| 477 |
+
It differs from createGpuMatHeader() in that it clones a part of this GpuMatND.
|
| 478 |
+
@note This operator does not increment this GpuMatND's reference counter;
|
| 479 |
+
*/
|
| 480 |
+
operator GpuMat() const;
|
| 481 |
+
|
| 482 |
+
GpuMatND(const GpuMatND&) = default;
|
| 483 |
+
GpuMatND& operator=(const GpuMatND&) = default;
|
| 484 |
+
|
| 485 |
+
#if defined(__GNUC__) && __GNUC__ < 5
|
| 486 |
+
// error: function '...' defaulted on its first declaration with an exception-specification
|
| 487 |
+
// that differs from the implicit declaration '...'
|
| 488 |
+
|
| 489 |
+
GpuMatND(GpuMatND&&) = default;
|
| 490 |
+
GpuMatND& operator=(GpuMatND&&) = default;
|
| 491 |
+
#else
|
| 492 |
+
GpuMatND(GpuMatND&&) noexcept = default;
|
| 493 |
+
GpuMatND& operator=(GpuMatND&&) noexcept = default;
|
| 494 |
+
#endif
|
| 495 |
+
|
| 496 |
+
void upload(InputArray src);
|
| 497 |
+
void upload(InputArray src, Stream& stream);
|
| 498 |
+
void download(OutputArray dst) const;
|
| 499 |
+
void download(OutputArray dst, Stream& stream) const;
|
| 500 |
+
|
| 501 |
+
//! returns true iff the GpuMatND data is continuous
|
| 502 |
+
//! (i.e. when there are no gaps between successive rows)
|
| 503 |
+
bool isContinuous() const;
|
| 504 |
+
|
| 505 |
+
//! returns true if the matrix is a sub-matrix of another matrix
|
| 506 |
+
bool isSubmatrix() const;
|
| 507 |
+
|
| 508 |
+
//! returns element size in bytes
|
| 509 |
+
size_t elemSize() const;
|
| 510 |
+
|
| 511 |
+
//! returns the size of element channel in bytes
|
| 512 |
+
size_t elemSize1() const;
|
| 513 |
+
|
| 514 |
+
//! returns true if data is null
|
| 515 |
+
bool empty() const;
|
| 516 |
+
|
| 517 |
+
//! returns true if not empty and points to external(user-allocated) gpu memory
|
| 518 |
+
bool external() const;
|
| 519 |
+
|
| 520 |
+
//! returns pointer to the first byte of the GPU memory
|
| 521 |
+
uchar* getDevicePtr() const;
|
| 522 |
+
|
| 523 |
+
//! returns the total number of array elements
|
| 524 |
+
size_t total() const;
|
| 525 |
+
|
| 526 |
+
//! returns the size of underlying memory in bytes
|
| 527 |
+
size_t totalMemSize() const;
|
| 528 |
+
|
| 529 |
+
//! returns element type
|
| 530 |
+
int type() const;
|
| 531 |
+
|
| 532 |
+
private:
|
| 533 |
+
//! internal use
|
| 534 |
+
void setFields(SizeArray size, int type, StepArray step = StepArray());
|
| 535 |
+
|
| 536 |
+
public:
|
| 537 |
+
/*! includes several bit-fields:
|
| 538 |
+
- the magic signature
|
| 539 |
+
- continuity flag
|
| 540 |
+
- depth
|
| 541 |
+
- number of channels
|
| 542 |
+
*/
|
| 543 |
+
int flags;
|
| 544 |
+
|
| 545 |
+
//! matrix dimensionality
|
| 546 |
+
int dims;
|
| 547 |
+
|
| 548 |
+
//! shape of this array
|
| 549 |
+
SizeArray size;
|
| 550 |
+
|
| 551 |
+
/*! step values
|
| 552 |
+
Their semantics is identical to the semantics of step for Mat.
|
| 553 |
+
*/
|
| 554 |
+
StepArray step;
|
| 555 |
+
|
| 556 |
+
private:
|
| 557 |
+
/*! internal use
|
| 558 |
+
If this GpuMatND holds external memory, this is empty.
|
| 559 |
+
*/
|
| 560 |
+
std::shared_ptr<GpuData> data_;
|
| 561 |
+
|
| 562 |
+
/*! internal use
|
| 563 |
+
If this GpuMatND manages memory with reference counting, this value is
|
| 564 |
+
always equal to data_->data. If this GpuMatND holds external memory,
|
| 565 |
+
data_ is empty and data points to the external memory.
|
| 566 |
+
*/
|
| 567 |
+
uchar* data;
|
| 568 |
+
|
| 569 |
+
/*! internal use
|
| 570 |
+
If this GpuMatND is a sub-matrix of a larger matrix, this value is the
|
| 571 |
+
difference of the first byte between the sub-matrix and the whole matrix.
|
| 572 |
+
*/
|
| 573 |
+
size_t offset;
|
| 574 |
+
};
|
| 575 |
+
|
| 576 |
+
/** @brief Creates a continuous matrix.
|
| 577 |
+
|
| 578 |
+
@param rows Row count.
|
| 579 |
+
@param cols Column count.
|
| 580 |
+
@param type Type of the matrix.
|
| 581 |
+
@param arr Destination matrix. This parameter changes only if it has a proper type and area (
|
| 582 |
+
\f$\texttt{rows} \times \texttt{cols}\f$ ).
|
| 583 |
+
|
| 584 |
+
Matrix is called continuous if its elements are stored continuously, that is, without gaps at the
|
| 585 |
+
end of each row.
|
| 586 |
+
*/
|
| 587 |
+
CV_EXPORTS_W void createContinuous(int rows, int cols, int type, OutputArray arr);
|
| 588 |
+
|
| 589 |
+
/** @brief Ensures that the size of a matrix is big enough and the matrix has a proper type.
|
| 590 |
+
|
| 591 |
+
@param rows Minimum desired number of rows.
|
| 592 |
+
@param cols Minimum desired number of columns.
|
| 593 |
+
@param type Desired matrix type.
|
| 594 |
+
@param arr Destination matrix.
|
| 595 |
+
|
| 596 |
+
The function does not reallocate memory if the matrix has proper attributes already.
|
| 597 |
+
*/
|
| 598 |
+
CV_EXPORTS_W void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
|
| 599 |
+
|
| 600 |
+
/** @brief Bindings overload to create a GpuMat from existing GPU memory.
|
| 601 |
+
@param rows Row count.
|
| 602 |
+
@param cols Column count.
|
| 603 |
+
@param type Type of the matrix.
|
| 604 |
+
@param cudaMemoryAddress Address of the allocated GPU memory on the device. This does not allocate matrix data. Instead, it just initializes the matrix header that points to the specified \a cudaMemoryAddress, which means that no data is copied. This operation is very efficient and can be used to process external data using OpenCV functions. The external data is not automatically deallocated, so you should take care of it.
|
| 605 |
+
@param step Number of bytes each matrix row occupies. The value should include the padding bytes at the end of each row, if any. If the parameter is missing (set to Mat::AUTO_STEP ), no padding is assumed and the actual step is calculated as cols*elemSize(). See GpuMat::elemSize.
|
| 606 |
+
@note Overload for generation of bindings only, not exported or intended for use internally from C++.
|
| 607 |
+
*/
|
| 608 |
+
CV_EXPORTS_W GpuMat inline createGpuMatFromCudaMemory(int rows, int cols, int type, size_t cudaMemoryAddress, size_t step = Mat::AUTO_STEP) {
|
| 609 |
+
return GpuMat(rows, cols, type, reinterpret_cast<void*>(cudaMemoryAddress), step);
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
/** @overload
|
| 613 |
+
@param size 2D array size: Size(cols, rows). In the Size() constructor, the number of rows and the number of columns go in the reverse order.
|
| 614 |
+
@param type Type of the matrix.
|
| 615 |
+
@param cudaMemoryAddress Address of the allocated GPU memory on the device. This does not allocate matrix data. Instead, it just initializes the matrix header that points to the specified \a cudaMemoryAddress, which means that no data is copied. This operation is very efficient and can be used to process external data using OpenCV functions. The external data is not automatically deallocated, so you should take care of it.
|
| 616 |
+
@param step Number of bytes each matrix row occupies. The value should include the padding bytes at the end of each row, if any. If the parameter is missing (set to Mat::AUTO_STEP ), no padding is assumed and the actual step is calculated as cols*elemSize(). See GpuMat::elemSize.
|
| 617 |
+
@note Overload for generation of bindings only, not exported or intended for use internally from C++.
|
| 618 |
+
*/
|
| 619 |
+
CV_EXPORTS_W inline GpuMat createGpuMatFromCudaMemory(Size size, int type, size_t cudaMemoryAddress, size_t step = Mat::AUTO_STEP) {
|
| 620 |
+
return GpuMat(size, type, reinterpret_cast<void*>(cudaMemoryAddress), step);
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
/** @brief BufferPool for use with CUDA streams
|
| 624 |
+
|
| 625 |
+
BufferPool utilizes Stream's allocator to create new buffers for GpuMat's. It is
|
| 626 |
+
only useful when enabled with #setBufferPoolUsage.
|
| 627 |
+
|
| 628 |
+
@code
|
| 629 |
+
setBufferPoolUsage(true);
|
| 630 |
+
@endcode
|
| 631 |
+
|
| 632 |
+
@note #setBufferPoolUsage must be called \em before any Stream declaration.
|
| 633 |
+
|
| 634 |
+
Users may specify custom allocator for Stream and may implement their own stream based
|
| 635 |
+
functions utilizing the same underlying GPU memory management.
|
| 636 |
+
|
| 637 |
+
If custom allocator is not specified, BufferPool utilizes StackAllocator by
|
| 638 |
+
default. StackAllocator allocates a chunk of GPU device memory beforehand,
|
| 639 |
+
and when GpuMat is declared later on, it is given the pre-allocated memory.
|
| 640 |
+
This kind of strategy reduces the number of calls for memory allocating APIs
|
| 641 |
+
such as cudaMalloc or cudaMallocPitch.
|
| 642 |
+
|
| 643 |
+
Below is an example that utilizes BufferPool with StackAllocator:
|
| 644 |
+
|
| 645 |
+
@code
|
| 646 |
+
#include <opencv2/opencv.hpp>
|
| 647 |
+
|
| 648 |
+
using namespace cv;
|
| 649 |
+
using namespace cv::cuda
|
| 650 |
+
|
| 651 |
+
int main()
|
| 652 |
+
{
|
| 653 |
+
setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool
|
| 654 |
+
setBufferPoolConfig(getDevice(), 1024 * 1024 * 64, 2); // Allocate 64 MB, 2 stacks (default is 10 MB, 5 stacks)
|
| 655 |
+
|
| 656 |
+
Stream stream1, stream2; // Each stream uses 1 stack
|
| 657 |
+
BufferPool pool1(stream1), pool2(stream2);
|
| 658 |
+
|
| 659 |
+
GpuMat d_src1 = pool1.getBuffer(4096, 4096, CV_8UC1); // 16MB
|
| 660 |
+
GpuMat d_dst1 = pool1.getBuffer(4096, 4096, CV_8UC3); // 48MB, pool1 is now full
|
| 661 |
+
|
| 662 |
+
GpuMat d_src2 = pool2.getBuffer(1024, 1024, CV_8UC1); // 1MB
|
| 663 |
+
GpuMat d_dst2 = pool2.getBuffer(1024, 1024, CV_8UC3); // 3MB
|
| 664 |
+
|
| 665 |
+
cvtColor(d_src1, d_dst1, cv::COLOR_GRAY2BGR, 0, stream1);
|
| 666 |
+
cvtColor(d_src2, d_dst2, cv::COLOR_GRAY2BGR, 0, stream2);
|
| 667 |
+
}
|
| 668 |
+
@endcode
|
| 669 |
+
|
| 670 |
+
If we allocate another GpuMat on pool1 in the above example, it will be carried out by
|
| 671 |
+
the DefaultAllocator since the stack for pool1 is full.
|
| 672 |
+
|
| 673 |
+
@code
|
| 674 |
+
GpuMat d_add1 = pool1.getBuffer(1024, 1024, CV_8UC1); // Stack for pool1 is full, memory is allocated with DefaultAllocator
|
| 675 |
+
@endcode
|
| 676 |
+
|
| 677 |
+
If a third stream is declared in the above example, allocating with #getBuffer
|
| 678 |
+
within that stream will also be carried out by the DefaultAllocator because we've run out of
|
| 679 |
+
stacks.
|
| 680 |
+
|
| 681 |
+
@code
|
| 682 |
+
Stream stream3; // Only 2 stacks were allocated, we've run out of stacks
|
| 683 |
+
BufferPool pool3(stream3);
|
| 684 |
+
GpuMat d_src3 = pool3.getBuffer(1024, 1024, CV_8UC1); // Memory is allocated with DefaultAllocator
|
| 685 |
+
@endcode
|
| 686 |
+
|
| 687 |
+
@warning When utilizing StackAllocator, deallocation order is important.
|
| 688 |
+
|
| 689 |
+
Just like a stack, deallocation must be done in LIFO order. Below is an example of
|
| 690 |
+
erroneous usage that violates LIFO rule. If OpenCV is compiled in Debug mode, this
|
| 691 |
+
sample code will emit CV_Assert error.
|
| 692 |
+
|
| 693 |
+
@code
|
| 694 |
+
int main()
|
| 695 |
+
{
|
| 696 |
+
setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool
|
| 697 |
+
Stream stream; // A default size (10 MB) stack is allocated to this stream
|
| 698 |
+
BufferPool pool(stream);
|
| 699 |
+
|
| 700 |
+
GpuMat mat1 = pool.getBuffer(1024, 1024, CV_8UC1); // Allocate mat1 (1MB)
|
| 701 |
+
GpuMat mat2 = pool.getBuffer(1024, 1024, CV_8UC1); // Allocate mat2 (1MB)
|
| 702 |
+
|
| 703 |
+
mat1.release(); // erroneous usage : mat2 must be deallocated before mat1
|
| 704 |
+
}
|
| 705 |
+
@endcode
|
| 706 |
+
|
| 707 |
+
Since C++ local variables are destroyed in the reverse order of construction,
|
| 708 |
+
the code sample below satisfies the LIFO rule. Local GpuMat's are deallocated
|
| 709 |
+
and the corresponding memory is automatically returned to the pool for later usage.
|
| 710 |
+
|
| 711 |
+
@code
|
| 712 |
+
int main()
|
| 713 |
+
{
|
| 714 |
+
setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool
|
| 715 |
+
setBufferPoolConfig(getDevice(), 1024 * 1024 * 64, 2); // Allocate 64 MB, 2 stacks (default is 10 MB, 5 stacks)
|
| 716 |
+
|
| 717 |
+
Stream stream1, stream2; // Each stream uses 1 stack
|
| 718 |
+
BufferPool pool1(stream1), pool2(stream2);
|
| 719 |
+
|
| 720 |
+
for (int i = 0; i < 10; i++)
|
| 721 |
+
{
|
| 722 |
+
GpuMat d_src1 = pool1.getBuffer(4096, 4096, CV_8UC1); // 16MB
|
| 723 |
+
GpuMat d_dst1 = pool1.getBuffer(4096, 4096, CV_8UC3); // 48MB, pool1 is now full
|
| 724 |
+
|
| 725 |
+
GpuMat d_src2 = pool2.getBuffer(1024, 1024, CV_8UC1); // 1MB
|
| 726 |
+
GpuMat d_dst2 = pool2.getBuffer(1024, 1024, CV_8UC3); // 3MB
|
| 727 |
+
|
| 728 |
+
d_src1.setTo(Scalar(i), stream1);
|
| 729 |
+
d_src2.setTo(Scalar(i), stream2);
|
| 730 |
+
|
| 731 |
+
cvtColor(d_src1, d_dst1, cv::COLOR_GRAY2BGR, 0, stream1);
|
| 732 |
+
cvtColor(d_src2, d_dst2, cv::COLOR_GRAY2BGR, 0, stream2);
|
| 733 |
+
// The order of destruction of the local variables is:
|
| 734 |
+
// d_dst2 => d_src2 => d_dst1 => d_src1
|
| 735 |
+
// LIFO rule is satisfied, this code runs without error
|
| 736 |
+
}
|
| 737 |
+
}
|
| 738 |
+
@endcode
|
| 739 |
+
*/
|
| 740 |
+
class CV_EXPORTS_W BufferPool
|
| 741 |
+
{
|
| 742 |
+
public:
|
| 743 |
+
|
| 744 |
+
//! Gets the BufferPool for the given stream.
|
| 745 |
+
CV_WRAP explicit BufferPool(Stream& stream);
|
| 746 |
+
|
| 747 |
+
//! Allocates a new GpuMat of given size and type.
|
| 748 |
+
CV_WRAP GpuMat getBuffer(int rows, int cols, int type);
|
| 749 |
+
|
| 750 |
+
// WARNING: unreachable code using Ninja
|
| 751 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 752 |
+
#pragma warning(push)
|
| 753 |
+
#pragma warning(disable: 4702)
|
| 754 |
+
#endif
|
| 755 |
+
//! Allocates a new GpuMat of given size and type.
|
| 756 |
+
CV_WRAP GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
|
| 757 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 758 |
+
#pragma warning(pop)
|
| 759 |
+
#endif
|
| 760 |
+
|
| 761 |
+
//! Returns the allocator associated with the stream.
|
| 762 |
+
CV_WRAP Ptr<GpuMat::Allocator> getAllocator() const { return allocator_; }
|
| 763 |
+
|
| 764 |
+
private:
|
| 765 |
+
Ptr<GpuMat::Allocator> allocator_;
|
| 766 |
+
};
|
| 767 |
+
|
| 768 |
+
//! BufferPool management (must be called before Stream creation)
|
| 769 |
+
CV_EXPORTS_W void setBufferPoolUsage(bool on);
|
| 770 |
+
CV_EXPORTS_W void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
|
| 771 |
+
|
| 772 |
+
//===================================================================================
|
| 773 |
+
// HostMem
|
| 774 |
+
//===================================================================================
|
| 775 |
+
|
| 776 |
+
/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA.
|
| 777 |
+
|
| 778 |
+
Its interface is also Mat-like but with additional memory type parameters.
|
| 779 |
+
|
| 780 |
+
- **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous
|
| 781 |
+
uploading/downloading data from/to GPU.
|
| 782 |
+
- **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU
|
| 783 |
+
address space, if supported.
|
| 784 |
+
- **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are
|
| 785 |
+
used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache
|
| 786 |
+
utilization.
|
| 787 |
+
|
| 788 |
+
@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2
|
| 789 |
+
Pinned Memory APIs* document or *CUDA C Programming Guide*.
|
| 790 |
+
*/
|
| 791 |
+
class CV_EXPORTS_W HostMem
|
| 792 |
+
{
|
| 793 |
+
public:
|
| 794 |
+
enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 };
|
| 795 |
+
|
| 796 |
+
static MatAllocator* getAllocator(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
|
| 797 |
+
|
| 798 |
+
CV_WRAP explicit HostMem(HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
|
| 799 |
+
|
| 800 |
+
HostMem(const HostMem& m);
|
| 801 |
+
|
| 802 |
+
CV_WRAP HostMem(int rows, int cols, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
|
| 803 |
+
CV_WRAP HostMem(Size size, int type, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
|
| 804 |
+
|
| 805 |
+
//! creates from host memory with coping data
|
| 806 |
+
CV_WRAP explicit HostMem(InputArray arr, HostMem::AllocType alloc_type = HostMem::AllocType::PAGE_LOCKED);
|
| 807 |
+
|
| 808 |
+
~HostMem();
|
| 809 |
+
|
| 810 |
+
HostMem& operator =(const HostMem& m);
|
| 811 |
+
|
| 812 |
+
//! swaps with other smart pointer
|
| 813 |
+
CV_WRAP void swap(HostMem& b);
|
| 814 |
+
|
| 815 |
+
//! returns deep copy of the matrix, i.e. the data is copied
|
| 816 |
+
CV_WRAP HostMem clone() const;
|
| 817 |
+
|
| 818 |
+
//! allocates new matrix data unless the matrix already has specified size and type.
|
| 819 |
+
CV_WRAP void create(int rows, int cols, int type);
|
| 820 |
+
void create(Size size, int type);
|
| 821 |
+
|
| 822 |
+
//! creates alternative HostMem header for the same data, with different
|
| 823 |
+
//! number of channels and/or different number of rows
|
| 824 |
+
CV_WRAP HostMem reshape(int cn, int rows = 0) const;
|
| 825 |
+
|
| 826 |
+
//! decrements reference counter and released memory if needed.
|
| 827 |
+
void release();
|
| 828 |
+
|
| 829 |
+
//! returns matrix header with disabled reference counting for HostMem data.
|
| 830 |
+
CV_WRAP Mat createMatHeader() const;
|
| 831 |
+
|
| 832 |
+
/** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting
|
| 833 |
+
for it.
|
| 834 |
+
|
| 835 |
+
This can be done only if memory was allocated with the SHARED flag and if it is supported by the
|
| 836 |
+
hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which
|
| 837 |
+
eliminates an extra copy.
|
| 838 |
+
*/
|
| 839 |
+
GpuMat createGpuMatHeader() const;
|
| 840 |
+
|
| 841 |
+
// Please see cv::Mat for descriptions
|
| 842 |
+
CV_WRAP bool isContinuous() const;
|
| 843 |
+
CV_WRAP size_t elemSize() const;
|
| 844 |
+
CV_WRAP size_t elemSize1() const;
|
| 845 |
+
CV_WRAP int type() const;
|
| 846 |
+
CV_WRAP int depth() const;
|
| 847 |
+
CV_WRAP int channels() const;
|
| 848 |
+
CV_WRAP size_t step1() const;
|
| 849 |
+
CV_WRAP Size size() const;
|
| 850 |
+
CV_WRAP bool empty() const;
|
| 851 |
+
|
| 852 |
+
// Please see cv::Mat for descriptions
|
| 853 |
+
int flags;
|
| 854 |
+
int rows, cols;
|
| 855 |
+
CV_PROP size_t step;
|
| 856 |
+
|
| 857 |
+
uchar* data;
|
| 858 |
+
int* refcount;
|
| 859 |
+
|
| 860 |
+
uchar* datastart;
|
| 861 |
+
const uchar* dataend;
|
| 862 |
+
|
| 863 |
+
AllocType alloc_type;
|
| 864 |
+
};
|
| 865 |
+
|
| 866 |
+
/** @brief Page-locks the memory of matrix and maps it for the device(s).
|
| 867 |
+
|
| 868 |
+
@param m Input matrix.
|
| 869 |
+
*/
|
| 870 |
+
CV_EXPORTS_W void registerPageLocked(Mat& m);
|
| 871 |
+
|
| 872 |
+
/** @brief Unmaps the memory of matrix and makes it pageable again.
|
| 873 |
+
|
| 874 |
+
@param m Input matrix.
|
| 875 |
+
*/
|
| 876 |
+
CV_EXPORTS_W void unregisterPageLocked(Mat& m);
|
| 877 |
+
|
| 878 |
+
//===================================================================================
|
| 879 |
+
// Stream
|
| 880 |
+
//===================================================================================
|
| 881 |
+
|
| 882 |
+
/** @brief This class encapsulates a queue of asynchronous calls.
|
| 883 |
+
|
| 884 |
+
@note Currently, you may face problems if an operation is enqueued twice with different data. Some
|
| 885 |
+
functions use the constant GPU memory, and next call may update the memory before the previous one
|
| 886 |
+
has been finished. But calling different operations asynchronously is safe because each operation
|
| 887 |
+
has its own constant buffer. Memory copy/upload/download/set operations to the buffers you hold are
|
| 888 |
+
also safe.
|
| 889 |
+
|
| 890 |
+
@note The Stream class is not thread-safe. Please use different Stream objects for different CPU threads.
|
| 891 |
+
|
| 892 |
+
@code
|
| 893 |
+
void thread1()
|
| 894 |
+
{
|
| 895 |
+
cv::cuda::Stream stream1;
|
| 896 |
+
cv::cuda::func1(..., stream1);
|
| 897 |
+
}
|
| 898 |
+
|
| 899 |
+
void thread2()
|
| 900 |
+
{
|
| 901 |
+
cv::cuda::Stream stream2;
|
| 902 |
+
cv::cuda::func2(..., stream2);
|
| 903 |
+
}
|
| 904 |
+
@endcode
|
| 905 |
+
|
| 906 |
+
@note By default all CUDA routines are launched in Stream::Null() object, if the stream is not specified by user.
|
| 907 |
+
In multi-threading environment the stream objects must be passed explicitly (see previous note).
|
| 908 |
+
*/
|
| 909 |
+
class CV_EXPORTS_W Stream
|
| 910 |
+
{
|
| 911 |
+
typedef void (Stream::*bool_type)() const;
|
| 912 |
+
void this_type_does_not_support_comparisons() const {}
|
| 913 |
+
|
| 914 |
+
public:
|
| 915 |
+
typedef void (*StreamCallback)(int status, void* userData);
|
| 916 |
+
|
| 917 |
+
//! creates a new asynchronous stream
|
| 918 |
+
CV_WRAP Stream();
|
| 919 |
+
|
| 920 |
+
//! creates a new asynchronous stream with custom allocator
|
| 921 |
+
CV_WRAP Stream(const Ptr<GpuMat::Allocator>& allocator);
|
| 922 |
+
|
| 923 |
+
/** @brief creates a new Stream using the cudaFlags argument to determine the behaviors of the stream
|
| 924 |
+
|
| 925 |
+
@note The cudaFlags parameter is passed to the underlying api cudaStreamCreateWithFlags() and
|
| 926 |
+
supports the same parameter values.
|
| 927 |
+
@code
|
| 928 |
+
// creates an OpenCV cuda::Stream that manages an asynchronous, non-blocking,
|
| 929 |
+
// non-default CUDA stream
|
| 930 |
+
cv::cuda::Stream cvStream(cudaStreamNonBlocking);
|
| 931 |
+
@endcode
|
| 932 |
+
*/
|
| 933 |
+
CV_WRAP Stream(const size_t cudaFlags);
|
| 934 |
+
|
| 935 |
+
/** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.
|
| 936 |
+
*/
|
| 937 |
+
CV_WRAP bool queryIfComplete() const;
|
| 938 |
+
|
| 939 |
+
/** @brief Blocks the current CPU thread until all operations in the stream are complete.
|
| 940 |
+
*/
|
| 941 |
+
CV_WRAP void waitForCompletion();
|
| 942 |
+
|
| 943 |
+
/** @brief Makes a compute stream wait on an event.
|
| 944 |
+
*/
|
| 945 |
+
CV_WRAP void waitEvent(const Event& event);
|
| 946 |
+
|
| 947 |
+
/** @brief Adds a callback to be called on the host after all currently enqueued items in the stream have
|
| 948 |
+
completed.
|
| 949 |
+
|
| 950 |
+
@note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization
|
| 951 |
+
that may depend on outstanding device work or other callbacks that are not mandated to run earlier.
|
| 952 |
+
Callbacks without a mandated order (in independent streams) execute in undefined order and may be
|
| 953 |
+
serialized.
|
| 954 |
+
*/
|
| 955 |
+
void enqueueHostCallback(StreamCallback callback, void* userData);
|
| 956 |
+
|
| 957 |
+
//! return Stream object for default CUDA stream
|
| 958 |
+
CV_WRAP static Stream& Null();
|
| 959 |
+
|
| 960 |
+
//! returns true if stream object is not default (!= 0)
|
| 961 |
+
operator bool_type() const;
|
| 962 |
+
|
| 963 |
+
//! return Pointer to CUDA stream
|
| 964 |
+
CV_WRAP void* cudaPtr() const;
|
| 965 |
+
|
| 966 |
+
class Impl;
|
| 967 |
+
|
| 968 |
+
private:
|
| 969 |
+
Ptr<Impl> impl_;
|
| 970 |
+
Stream(const Ptr<Impl>& impl);
|
| 971 |
+
|
| 972 |
+
friend struct StreamAccessor;
|
| 973 |
+
friend class BufferPool;
|
| 974 |
+
friend class DefaultDeviceInitializer;
|
| 975 |
+
};
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
/** @brief Bindings overload to create a Stream object from the address stored in an existing CUDA Runtime API stream pointer (cudaStream_t).
|
| 979 |
+
@param cudaStreamMemoryAddress Memory address stored in a CUDA Runtime API stream pointer (cudaStream_t). The created Stream object does not perform any allocation or deallocation and simply wraps existing raw CUDA Runtime API stream pointer.
|
| 980 |
+
@note Overload for generation of bindings only, not exported or intended for use internally from C++.
|
| 981 |
+
*/
|
| 982 |
+
CV_EXPORTS_W Stream wrapStream(size_t cudaStreamMemoryAddress);
|
| 983 |
+
|
| 984 |
+
class CV_EXPORTS_W Event
|
| 985 |
+
{
|
| 986 |
+
public:
|
| 987 |
+
enum CreateFlags
|
| 988 |
+
{
|
| 989 |
+
DEFAULT = 0x00, /**< Default event flag */
|
| 990 |
+
BLOCKING_SYNC = 0x01, /**< Event uses blocking synchronization */
|
| 991 |
+
DISABLE_TIMING = 0x02, /**< Event will not record timing data */
|
| 992 |
+
INTERPROCESS = 0x04 /**< Event is suitable for interprocess use. DisableTiming must be set */
|
| 993 |
+
};
|
| 994 |
+
|
| 995 |
+
CV_WRAP explicit Event(const Event::CreateFlags flags = Event::CreateFlags::DEFAULT);
|
| 996 |
+
|
| 997 |
+
//! records an event
|
| 998 |
+
CV_WRAP void record(Stream& stream = Stream::Null());
|
| 999 |
+
|
| 1000 |
+
//! queries an event's status
|
| 1001 |
+
CV_WRAP bool queryIfComplete() const;
|
| 1002 |
+
|
| 1003 |
+
//! waits for an event to complete
|
| 1004 |
+
CV_WRAP void waitForCompletion();
|
| 1005 |
+
|
| 1006 |
+
//! computes the elapsed time between events
|
| 1007 |
+
CV_WRAP static float elapsedTime(const Event& start, const Event& end);
|
| 1008 |
+
|
| 1009 |
+
class Impl;
|
| 1010 |
+
|
| 1011 |
+
private:
|
| 1012 |
+
Ptr<Impl> impl_;
|
| 1013 |
+
Event(const Ptr<Impl>& impl);
|
| 1014 |
+
|
| 1015 |
+
friend struct EventAccessor;
|
| 1016 |
+
};
|
| 1017 |
+
CV_ENUM_FLAGS(Event::CreateFlags)
|
| 1018 |
+
|
| 1019 |
+
//! @} cudacore_struct
|
| 1020 |
+
|
| 1021 |
+
//===================================================================================
|
| 1022 |
+
// Initialization & Info
|
| 1023 |
+
//===================================================================================
|
| 1024 |
+
|
| 1025 |
+
//! @addtogroup cudacore_init
|
| 1026 |
+
//! @{
|
| 1027 |
+
|
| 1028 |
+
/** @brief Returns the number of installed CUDA-enabled devices.
|
| 1029 |
+
|
| 1030 |
+
Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support,
|
| 1031 |
+
this function returns 0. If the CUDA driver is not installed, or is incompatible, this function
|
| 1032 |
+
returns -1.
|
| 1033 |
+
*/
|
| 1034 |
+
CV_EXPORTS_W int getCudaEnabledDeviceCount();
|
| 1035 |
+
|
| 1036 |
+
/** @brief Sets a device and initializes it for the current thread.
|
| 1037 |
+
|
| 1038 |
+
@param device System index of a CUDA device starting with 0.
|
| 1039 |
+
|
| 1040 |
+
If the call of this function is omitted, a default device is initialized at the fist CUDA usage.
|
| 1041 |
+
*/
|
| 1042 |
+
CV_EXPORTS_W void setDevice(int device);
|
| 1043 |
+
|
| 1044 |
+
/** @brief Returns the current device index set by cuda::setDevice or initialized by default.
|
| 1045 |
+
*/
|
| 1046 |
+
CV_EXPORTS_W int getDevice();
|
| 1047 |
+
|
| 1048 |
+
/** @brief Explicitly destroys and cleans up all resources associated with the current device in the current
|
| 1049 |
+
process.
|
| 1050 |
+
|
| 1051 |
+
Any subsequent API call to this device will reinitialize the device.
|
| 1052 |
+
*/
|
| 1053 |
+
CV_EXPORTS_W void resetDevice();
|
| 1054 |
+
|
| 1055 |
+
/** @brief Enumeration providing CUDA computing features.
|
| 1056 |
+
*/
|
| 1057 |
+
enum FeatureSet
|
| 1058 |
+
{
|
| 1059 |
+
FEATURE_SET_COMPUTE_10 = 10,
|
| 1060 |
+
FEATURE_SET_COMPUTE_11 = 11,
|
| 1061 |
+
FEATURE_SET_COMPUTE_12 = 12,
|
| 1062 |
+
FEATURE_SET_COMPUTE_13 = 13,
|
| 1063 |
+
FEATURE_SET_COMPUTE_20 = 20,
|
| 1064 |
+
FEATURE_SET_COMPUTE_21 = 21,
|
| 1065 |
+
FEATURE_SET_COMPUTE_30 = 30,
|
| 1066 |
+
FEATURE_SET_COMPUTE_32 = 32,
|
| 1067 |
+
FEATURE_SET_COMPUTE_35 = 35,
|
| 1068 |
+
FEATURE_SET_COMPUTE_50 = 50,
|
| 1069 |
+
|
| 1070 |
+
GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
|
| 1071 |
+
SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
|
| 1072 |
+
NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13,
|
| 1073 |
+
WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30,
|
| 1074 |
+
DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35
|
| 1075 |
+
};
|
| 1076 |
+
|
| 1077 |
+
//! checks whether current device supports the given feature
|
| 1078 |
+
CV_EXPORTS bool deviceSupports(FeatureSet feature_set);
|
| 1079 |
+
|
| 1080 |
+
/** @brief Class providing a set of static methods to check what NVIDIA\* card architecture the CUDA module was
|
| 1081 |
+
built for.
|
| 1082 |
+
|
| 1083 |
+
According to the CUDA C Programming Guide Version 3.2: "PTX code produced for some specific compute
|
| 1084 |
+
capability can always be compiled to binary code of greater or equal compute capability".
|
| 1085 |
+
*/
|
| 1086 |
+
class CV_EXPORTS_W TargetArchs
|
| 1087 |
+
{
|
| 1088 |
+
public:
|
| 1089 |
+
/** @brief The following method checks whether the module was built with the support of the given feature:
|
| 1090 |
+
|
| 1091 |
+
@param feature_set Features to be checked. See :ocvcuda::FeatureSet.
|
| 1092 |
+
*/
|
| 1093 |
+
static bool builtWith(FeatureSet feature_set);
|
| 1094 |
+
|
| 1095 |
+
/** @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA
|
| 1096 |
+
code for the given architecture(s):
|
| 1097 |
+
|
| 1098 |
+
@param major Major compute capability version.
|
| 1099 |
+
@param minor Minor compute capability version.
|
| 1100 |
+
*/
|
| 1101 |
+
CV_WRAP static bool has(int major, int minor);
|
| 1102 |
+
CV_WRAP static bool hasPtx(int major, int minor);
|
| 1103 |
+
CV_WRAP static bool hasBin(int major, int minor);
|
| 1104 |
+
|
| 1105 |
+
CV_WRAP static bool hasEqualOrLessPtx(int major, int minor);
|
| 1106 |
+
CV_WRAP static bool hasEqualOrGreater(int major, int minor);
|
| 1107 |
+
CV_WRAP static bool hasEqualOrGreaterPtx(int major, int minor);
|
| 1108 |
+
CV_WRAP static bool hasEqualOrGreaterBin(int major, int minor);
|
| 1109 |
+
};
|
| 1110 |
+
|
| 1111 |
+
/** @brief Class providing functionality for querying the specified GPU properties.
|
| 1112 |
+
*/
|
| 1113 |
+
class CV_EXPORTS_W DeviceInfo
|
| 1114 |
+
{
|
| 1115 |
+
public:
|
| 1116 |
+
//! creates DeviceInfo object for the current GPU
|
| 1117 |
+
CV_WRAP DeviceInfo();
|
| 1118 |
+
|
| 1119 |
+
/** @brief The constructors.
|
| 1120 |
+
|
| 1121 |
+
@param device_id System index of the CUDA device starting with 0.
|
| 1122 |
+
|
| 1123 |
+
Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it
|
| 1124 |
+
constructs an object for the current device.
|
| 1125 |
+
*/
|
| 1126 |
+
CV_WRAP DeviceInfo(int device_id);
|
| 1127 |
+
|
| 1128 |
+
/** @brief Returns system index of the CUDA device starting with 0.
|
| 1129 |
+
*/
|
| 1130 |
+
CV_WRAP int deviceID() const;
|
| 1131 |
+
|
| 1132 |
+
//! ASCII string identifying device
|
| 1133 |
+
const char* name() const;
|
| 1134 |
+
|
| 1135 |
+
//! global memory available on device in bytes
|
| 1136 |
+
CV_WRAP size_t totalGlobalMem() const;
|
| 1137 |
+
|
| 1138 |
+
//! shared memory available per block in bytes
|
| 1139 |
+
CV_WRAP size_t sharedMemPerBlock() const;
|
| 1140 |
+
|
| 1141 |
+
//! 32-bit registers available per block
|
| 1142 |
+
CV_WRAP int regsPerBlock() const;
|
| 1143 |
+
|
| 1144 |
+
//! warp size in threads
|
| 1145 |
+
CV_WRAP int warpSize() const;
|
| 1146 |
+
|
| 1147 |
+
//! maximum pitch in bytes allowed by memory copies
|
| 1148 |
+
CV_WRAP size_t memPitch() const;
|
| 1149 |
+
|
| 1150 |
+
//! maximum number of threads per block
|
| 1151 |
+
CV_WRAP int maxThreadsPerBlock() const;
|
| 1152 |
+
|
| 1153 |
+
//! maximum size of each dimension of a block
|
| 1154 |
+
CV_WRAP Vec3i maxThreadsDim() const;
|
| 1155 |
+
|
| 1156 |
+
//! maximum size of each dimension of a grid
|
| 1157 |
+
CV_WRAP Vec3i maxGridSize() const;
|
| 1158 |
+
|
| 1159 |
+
//! clock frequency in kilohertz
|
| 1160 |
+
CV_WRAP int clockRate() const;
|
| 1161 |
+
|
| 1162 |
+
//! constant memory available on device in bytes
|
| 1163 |
+
CV_WRAP size_t totalConstMem() const;
|
| 1164 |
+
|
| 1165 |
+
//! major compute capability
|
| 1166 |
+
CV_WRAP int majorVersion() const;
|
| 1167 |
+
|
| 1168 |
+
//! minor compute capability
|
| 1169 |
+
CV_WRAP int minorVersion() const;
|
| 1170 |
+
|
| 1171 |
+
//! alignment requirement for textures
|
| 1172 |
+
CV_WRAP size_t textureAlignment() const;
|
| 1173 |
+
|
| 1174 |
+
//! pitch alignment requirement for texture references bound to pitched memory
|
| 1175 |
+
CV_WRAP size_t texturePitchAlignment() const;
|
| 1176 |
+
|
| 1177 |
+
//! number of multiprocessors on device
|
| 1178 |
+
CV_WRAP int multiProcessorCount() const;
|
| 1179 |
+
|
| 1180 |
+
//! specified whether there is a run time limit on kernels
|
| 1181 |
+
CV_WRAP bool kernelExecTimeoutEnabled() const;
|
| 1182 |
+
|
| 1183 |
+
//! device is integrated as opposed to discrete
|
| 1184 |
+
CV_WRAP bool integrated() const;
|
| 1185 |
+
|
| 1186 |
+
//! device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer
|
| 1187 |
+
CV_WRAP bool canMapHostMemory() const;
|
| 1188 |
+
|
| 1189 |
+
enum ComputeMode
|
| 1190 |
+
{
|
| 1191 |
+
ComputeModeDefault, /**< default compute mode (Multiple threads can use cudaSetDevice with this device) */
|
| 1192 |
+
ComputeModeExclusive, /**< compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice with this device) */
|
| 1193 |
+
ComputeModeProhibited, /**< compute-prohibited mode (No threads can use cudaSetDevice with this device) */
|
| 1194 |
+
ComputeModeExclusiveProcess /**< compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice with this device) */
|
| 1195 |
+
};
|
| 1196 |
+
|
| 1197 |
+
//! compute mode
|
| 1198 |
+
CV_WRAP DeviceInfo::ComputeMode computeMode() const;
|
| 1199 |
+
|
| 1200 |
+
//! maximum 1D texture size
|
| 1201 |
+
CV_WRAP int maxTexture1D() const;
|
| 1202 |
+
|
| 1203 |
+
//! maximum 1D mipmapped texture size
|
| 1204 |
+
CV_WRAP int maxTexture1DMipmap() const;
|
| 1205 |
+
|
| 1206 |
+
//! maximum size for 1D textures bound to linear memory
|
| 1207 |
+
CV_WRAP int maxTexture1DLinear() const;
|
| 1208 |
+
|
| 1209 |
+
//! maximum 2D texture dimensions
|
| 1210 |
+
CV_WRAP Vec2i maxTexture2D() const;
|
| 1211 |
+
|
| 1212 |
+
//! maximum 2D mipmapped texture dimensions
|
| 1213 |
+
CV_WRAP Vec2i maxTexture2DMipmap() const;
|
| 1214 |
+
|
| 1215 |
+
//! maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory
|
| 1216 |
+
CV_WRAP Vec3i maxTexture2DLinear() const;
|
| 1217 |
+
|
| 1218 |
+
//! maximum 2D texture dimensions if texture gather operations have to be performed
|
| 1219 |
+
CV_WRAP Vec2i maxTexture2DGather() const;
|
| 1220 |
+
|
| 1221 |
+
//! maximum 3D texture dimensions
|
| 1222 |
+
CV_WRAP Vec3i maxTexture3D() const;
|
| 1223 |
+
|
| 1224 |
+
//! maximum Cubemap texture dimensions
|
| 1225 |
+
CV_WRAP int maxTextureCubemap() const;
|
| 1226 |
+
|
| 1227 |
+
//! maximum 1D layered texture dimensions
|
| 1228 |
+
CV_WRAP Vec2i maxTexture1DLayered() const;
|
| 1229 |
+
|
| 1230 |
+
//! maximum 2D layered texture dimensions
|
| 1231 |
+
CV_WRAP Vec3i maxTexture2DLayered() const;
|
| 1232 |
+
|
| 1233 |
+
//! maximum Cubemap layered texture dimensions
|
| 1234 |
+
CV_WRAP Vec2i maxTextureCubemapLayered() const;
|
| 1235 |
+
|
| 1236 |
+
//! maximum 1D surface size
|
| 1237 |
+
CV_WRAP int maxSurface1D() const;
|
| 1238 |
+
|
| 1239 |
+
//! maximum 2D surface dimensions
|
| 1240 |
+
CV_WRAP Vec2i maxSurface2D() const;
|
| 1241 |
+
|
| 1242 |
+
//! maximum 3D surface dimensions
|
| 1243 |
+
CV_WRAP Vec3i maxSurface3D() const;
|
| 1244 |
+
|
| 1245 |
+
//! maximum 1D layered surface dimensions
|
| 1246 |
+
CV_WRAP Vec2i maxSurface1DLayered() const;
|
| 1247 |
+
|
| 1248 |
+
//! maximum 2D layered surface dimensions
|
| 1249 |
+
CV_WRAP Vec3i maxSurface2DLayered() const;
|
| 1250 |
+
|
| 1251 |
+
//! maximum Cubemap surface dimensions
|
| 1252 |
+
CV_WRAP int maxSurfaceCubemap() const;
|
| 1253 |
+
|
| 1254 |
+
//! maximum Cubemap layered surface dimensions
|
| 1255 |
+
CV_WRAP Vec2i maxSurfaceCubemapLayered() const;
|
| 1256 |
+
|
| 1257 |
+
//! alignment requirements for surfaces
|
| 1258 |
+
CV_WRAP size_t surfaceAlignment() const;
|
| 1259 |
+
|
| 1260 |
+
//! device can possibly execute multiple kernels concurrently
|
| 1261 |
+
CV_WRAP bool concurrentKernels() const;
|
| 1262 |
+
|
| 1263 |
+
//! device has ECC support enabled
|
| 1264 |
+
CV_WRAP bool ECCEnabled() const;
|
| 1265 |
+
|
| 1266 |
+
//! PCI bus ID of the device
|
| 1267 |
+
CV_WRAP int pciBusID() const;
|
| 1268 |
+
|
| 1269 |
+
//! PCI device ID of the device
|
| 1270 |
+
CV_WRAP int pciDeviceID() const;
|
| 1271 |
+
|
| 1272 |
+
//! PCI domain ID of the device
|
| 1273 |
+
CV_WRAP int pciDomainID() const;
|
| 1274 |
+
|
| 1275 |
+
//! true if device is a Tesla device using TCC driver, false otherwise
|
| 1276 |
+
CV_WRAP bool tccDriver() const;
|
| 1277 |
+
|
| 1278 |
+
//! number of asynchronous engines
|
| 1279 |
+
CV_WRAP int asyncEngineCount() const;
|
| 1280 |
+
|
| 1281 |
+
//! device shares a unified address space with the host
|
| 1282 |
+
CV_WRAP bool unifiedAddressing() const;
|
| 1283 |
+
|
| 1284 |
+
//! peak memory clock frequency in kilohertz
|
| 1285 |
+
CV_WRAP int memoryClockRate() const;
|
| 1286 |
+
|
| 1287 |
+
//! global memory bus width in bits
|
| 1288 |
+
CV_WRAP int memoryBusWidth() const;
|
| 1289 |
+
|
| 1290 |
+
//! size of L2 cache in bytes
|
| 1291 |
+
CV_WRAP int l2CacheSize() const;
|
| 1292 |
+
|
| 1293 |
+
//! maximum resident threads per multiprocessor
|
| 1294 |
+
CV_WRAP int maxThreadsPerMultiProcessor() const;
|
| 1295 |
+
|
| 1296 |
+
//! gets free and total device memory
|
| 1297 |
+
CV_WRAP void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
|
| 1298 |
+
CV_WRAP size_t freeMemory() const;
|
| 1299 |
+
CV_WRAP size_t totalMemory() const;
|
| 1300 |
+
|
| 1301 |
+
/** @brief Provides information on CUDA feature support.
|
| 1302 |
+
|
| 1303 |
+
@param feature_set Features to be checked. See cuda::FeatureSet.
|
| 1304 |
+
|
| 1305 |
+
This function returns true if the device has the specified CUDA feature. Otherwise, it returns false
|
| 1306 |
+
*/
|
| 1307 |
+
bool supports(FeatureSet feature_set) const;
|
| 1308 |
+
|
| 1309 |
+
/** @brief Checks the CUDA module and device compatibility.
|
| 1310 |
+
|
| 1311 |
+
This function returns true if the CUDA module can be run on the specified device. Otherwise, it
|
| 1312 |
+
returns false .
|
| 1313 |
+
*/
|
| 1314 |
+
CV_WRAP bool isCompatible() const;
|
| 1315 |
+
|
| 1316 |
+
private:
|
| 1317 |
+
int device_id_;
|
| 1318 |
+
};
|
| 1319 |
+
|
| 1320 |
+
CV_EXPORTS_W void printCudaDeviceInfo(int device);
|
| 1321 |
+
CV_EXPORTS_W void printShortCudaDeviceInfo(int device);
|
| 1322 |
+
|
| 1323 |
+
/** @brief Converts an array to half precision floating number.
|
| 1324 |
+
|
| 1325 |
+
@param _src input array.
|
| 1326 |
+
@param _dst output array.
|
| 1327 |
+
@param stream Stream for the asynchronous version.
|
| 1328 |
+
@sa convertFp16
|
| 1329 |
+
*/
|
| 1330 |
+
CV_EXPORTS void convertFp16(InputArray _src, OutputArray _dst, Stream& stream = Stream::Null());
|
| 1331 |
+
|
| 1332 |
+
//! @} cudacore_init
|
| 1333 |
+
|
| 1334 |
+
}} // namespace cv { namespace cuda {
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
#include "opencv2/core/cuda.inl.hpp"
|
| 1338 |
+
|
| 1339 |
+
#endif /* OPENCV_CORE_CUDA_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda.inl.hpp
ADDED
|
@@ -0,0 +1,763 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Third party copyrights are property of their respective owners.
|
| 17 |
+
//
|
| 18 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 19 |
+
// are permitted provided that the following conditions are met:
|
| 20 |
+
//
|
| 21 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 22 |
+
// this list of conditions and the following disclaimer.
|
| 23 |
+
//
|
| 24 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 25 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 26 |
+
// and/or other materials provided with the distribution.
|
| 27 |
+
//
|
| 28 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 29 |
+
// derived from this software without specific prior written permission.
|
| 30 |
+
//
|
| 31 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 32 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 33 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 34 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 35 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 36 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 37 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 38 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 39 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 40 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 41 |
+
//
|
| 42 |
+
//M*/
|
| 43 |
+
|
| 44 |
+
#ifndef OPENCV_CORE_CUDAINL_HPP
|
| 45 |
+
#define OPENCV_CORE_CUDAINL_HPP
|
| 46 |
+
|
| 47 |
+
#include "opencv2/core/cuda.hpp"
|
| 48 |
+
|
| 49 |
+
//! @cond IGNORED
|
| 50 |
+
|
| 51 |
+
namespace cv { namespace cuda {
|
| 52 |
+
|
| 53 |
+
//===================================================================================
|
| 54 |
+
// GpuMat
|
| 55 |
+
//===================================================================================
|
| 56 |
+
|
| 57 |
+
inline
|
| 58 |
+
GpuMat::GpuMat(Allocator* allocator_)
|
| 59 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 60 |
+
{}
|
| 61 |
+
|
| 62 |
+
inline
|
| 63 |
+
GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_)
|
| 64 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 65 |
+
{
|
| 66 |
+
if (rows_ > 0 && cols_ > 0)
|
| 67 |
+
create(rows_, cols_, type_);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
inline
|
| 71 |
+
GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_)
|
| 72 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 73 |
+
{
|
| 74 |
+
if (size_.height > 0 && size_.width > 0)
|
| 75 |
+
create(size_.height, size_.width, type_);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// WARNING: unreachable code using Ninja
|
| 79 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 80 |
+
#pragma warning(push)
|
| 81 |
+
#pragma warning(disable: 4702)
|
| 82 |
+
#endif
|
| 83 |
+
inline
|
| 84 |
+
GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_)
|
| 85 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 86 |
+
{
|
| 87 |
+
if (rows_ > 0 && cols_ > 0)
|
| 88 |
+
{
|
| 89 |
+
create(rows_, cols_, type_);
|
| 90 |
+
setTo(s_);
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
inline
|
| 95 |
+
GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_)
|
| 96 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 97 |
+
{
|
| 98 |
+
if (size_.height > 0 && size_.width > 0)
|
| 99 |
+
{
|
| 100 |
+
create(size_.height, size_.width, type_);
|
| 101 |
+
setTo(s_);
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 105 |
+
#pragma warning(pop)
|
| 106 |
+
#endif
|
| 107 |
+
|
| 108 |
+
inline
|
| 109 |
+
GpuMat::GpuMat(const GpuMat& m)
|
| 110 |
+
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator)
|
| 111 |
+
{
|
| 112 |
+
if (refcount)
|
| 113 |
+
CV_XADD(refcount, 1);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
inline
|
| 117 |
+
GpuMat::GpuMat(InputArray arr, Allocator* allocator_) :
|
| 118 |
+
flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_)
|
| 119 |
+
{
|
| 120 |
+
upload(arr);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
inline
|
| 124 |
+
GpuMat::~GpuMat()
|
| 125 |
+
{
|
| 126 |
+
release();
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
inline
|
| 130 |
+
GpuMat& GpuMat::operator =(const GpuMat& m)
|
| 131 |
+
{
|
| 132 |
+
if (this != &m)
|
| 133 |
+
{
|
| 134 |
+
GpuMat temp(m);
|
| 135 |
+
swap(temp);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
return *this;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
inline
|
| 142 |
+
void GpuMat::create(Size size_, int type_)
|
| 143 |
+
{
|
| 144 |
+
create(size_.height, size_.width, type_);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
inline
|
| 148 |
+
void GpuMat::swap(GpuMat& b)
|
| 149 |
+
{
|
| 150 |
+
std::swap(flags, b.flags);
|
| 151 |
+
std::swap(rows, b.rows);
|
| 152 |
+
std::swap(cols, b.cols);
|
| 153 |
+
std::swap(step, b.step);
|
| 154 |
+
std::swap(data, b.data);
|
| 155 |
+
std::swap(datastart, b.datastart);
|
| 156 |
+
std::swap(dataend, b.dataend);
|
| 157 |
+
std::swap(refcount, b.refcount);
|
| 158 |
+
std::swap(allocator, b.allocator);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
inline
|
| 162 |
+
GpuMat GpuMat::clone() const
|
| 163 |
+
{
|
| 164 |
+
GpuMat m;
|
| 165 |
+
copyTo(m);
|
| 166 |
+
return m;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
// WARNING: unreachable code using Ninja
|
| 170 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 171 |
+
#pragma warning(push)
|
| 172 |
+
#pragma warning(disable: 4702)
|
| 173 |
+
#endif
|
| 174 |
+
inline
|
| 175 |
+
void GpuMat::copyTo(OutputArray dst, InputArray mask) const
|
| 176 |
+
{
|
| 177 |
+
copyTo(dst, mask, Stream::Null());
|
| 178 |
+
}
|
| 179 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 180 |
+
#pragma warning(pop)
|
| 181 |
+
#endif
|
| 182 |
+
|
| 183 |
+
inline
|
| 184 |
+
GpuMat& GpuMat::setTo(Scalar s)
|
| 185 |
+
{
|
| 186 |
+
return setTo(s, Stream::Null());
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
inline
|
| 190 |
+
GpuMat& GpuMat::setTo(Scalar s, InputArray mask)
|
| 191 |
+
{
|
| 192 |
+
return setTo(s, mask, Stream::Null());
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
// WARNING: unreachable code using Ninja
|
| 196 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 197 |
+
#pragma warning(push)
|
| 198 |
+
#pragma warning(disable: 4702)
|
| 199 |
+
#endif
|
| 200 |
+
inline
|
| 201 |
+
void GpuMat::convertTo(OutputArray dst, int rtype) const
|
| 202 |
+
{
|
| 203 |
+
convertTo(dst, rtype, Stream::Null());
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
inline
|
| 207 |
+
void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const
|
| 208 |
+
{
|
| 209 |
+
convertTo(dst, rtype, alpha, beta, Stream::Null());
|
| 210 |
+
}
|
| 211 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 212 |
+
#pragma warning(pop)
|
| 213 |
+
#endif
|
| 214 |
+
|
| 215 |
+
inline
|
| 216 |
+
void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const
|
| 217 |
+
{
|
| 218 |
+
convertTo(dst, rtype, alpha, 0.0, stream);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
inline
|
| 222 |
+
void GpuMat::assignTo(GpuMat& m, int _type) const
|
| 223 |
+
{
|
| 224 |
+
if (_type < 0)
|
| 225 |
+
m = *this;
|
| 226 |
+
else
|
| 227 |
+
convertTo(m, _type);
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
inline
|
| 231 |
+
uchar* GpuMat::ptr(int y)
|
| 232 |
+
{
|
| 233 |
+
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
| 234 |
+
return data + step * y;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
inline
|
| 238 |
+
const uchar* GpuMat::ptr(int y) const
|
| 239 |
+
{
|
| 240 |
+
CV_DbgAssert( (unsigned)y < (unsigned)rows );
|
| 241 |
+
return data + step * y;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template<typename _Tp> inline
|
| 245 |
+
_Tp* GpuMat::ptr(int y)
|
| 246 |
+
{
|
| 247 |
+
return (_Tp*)ptr(y);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
template<typename _Tp> inline
|
| 251 |
+
const _Tp* GpuMat::ptr(int y) const
|
| 252 |
+
{
|
| 253 |
+
return (const _Tp*)ptr(y);
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
template <class T> inline
|
| 257 |
+
GpuMat::operator PtrStepSz<T>() const
|
| 258 |
+
{
|
| 259 |
+
return PtrStepSz<T>(rows, cols, (T*)data, step);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
template <class T> inline
|
| 263 |
+
GpuMat::operator PtrStep<T>() const
|
| 264 |
+
{
|
| 265 |
+
return PtrStep<T>((T*)data, step);
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
inline
|
| 269 |
+
GpuMat GpuMat::row(int y) const
|
| 270 |
+
{
|
| 271 |
+
return GpuMat(*this, Range(y, y+1), Range::all());
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
inline
|
| 275 |
+
GpuMat GpuMat::col(int x) const
|
| 276 |
+
{
|
| 277 |
+
return GpuMat(*this, Range::all(), Range(x, x+1));
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
inline
|
| 281 |
+
GpuMat GpuMat::rowRange(int startrow, int endrow) const
|
| 282 |
+
{
|
| 283 |
+
return GpuMat(*this, Range(startrow, endrow), Range::all());
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
inline
|
| 287 |
+
GpuMat GpuMat::rowRange(Range r) const
|
| 288 |
+
{
|
| 289 |
+
return GpuMat(*this, r, Range::all());
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
inline
|
| 293 |
+
GpuMat GpuMat::colRange(int startcol, int endcol) const
|
| 294 |
+
{
|
| 295 |
+
return GpuMat(*this, Range::all(), Range(startcol, endcol));
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
inline
|
| 299 |
+
GpuMat GpuMat::colRange(Range r) const
|
| 300 |
+
{
|
| 301 |
+
return GpuMat(*this, Range::all(), r);
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
inline
|
| 305 |
+
GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const
|
| 306 |
+
{
|
| 307 |
+
return GpuMat(*this, rowRange_, colRange_);
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
inline
|
| 311 |
+
GpuMat GpuMat::operator ()(Rect roi) const
|
| 312 |
+
{
|
| 313 |
+
return GpuMat(*this, roi);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
inline
|
| 317 |
+
bool GpuMat::isContinuous() const
|
| 318 |
+
{
|
| 319 |
+
return (flags & Mat::CONTINUOUS_FLAG) != 0;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
inline
|
| 323 |
+
size_t GpuMat::elemSize() const
|
| 324 |
+
{
|
| 325 |
+
return CV_ELEM_SIZE(flags);
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
inline
|
| 329 |
+
size_t GpuMat::elemSize1() const
|
| 330 |
+
{
|
| 331 |
+
return CV_ELEM_SIZE1(flags);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
inline
|
| 335 |
+
int GpuMat::type() const
|
| 336 |
+
{
|
| 337 |
+
return CV_MAT_TYPE(flags);
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
inline
|
| 341 |
+
int GpuMat::depth() const
|
| 342 |
+
{
|
| 343 |
+
return CV_MAT_DEPTH(flags);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
inline
|
| 347 |
+
int GpuMat::channels() const
|
| 348 |
+
{
|
| 349 |
+
return CV_MAT_CN(flags);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
inline
|
| 353 |
+
size_t GpuMat::step1() const
|
| 354 |
+
{
|
| 355 |
+
return step / elemSize1();
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
inline
|
| 359 |
+
Size GpuMat::size() const
|
| 360 |
+
{
|
| 361 |
+
return Size(cols, rows);
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
inline
|
| 365 |
+
bool GpuMat::empty() const
|
| 366 |
+
{
|
| 367 |
+
return data == 0;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
inline
|
| 371 |
+
void* GpuMat::cudaPtr() const
|
| 372 |
+
{
|
| 373 |
+
return data;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
static inline
|
| 377 |
+
GpuMat createContinuous(int rows, int cols, int type)
|
| 378 |
+
{
|
| 379 |
+
GpuMat m;
|
| 380 |
+
createContinuous(rows, cols, type, m);
|
| 381 |
+
return m;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
static inline
|
| 385 |
+
void createContinuous(Size size, int type, OutputArray arr)
|
| 386 |
+
{
|
| 387 |
+
createContinuous(size.height, size.width, type, arr);
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
static inline
|
| 391 |
+
GpuMat createContinuous(Size size, int type)
|
| 392 |
+
{
|
| 393 |
+
GpuMat m;
|
| 394 |
+
createContinuous(size, type, m);
|
| 395 |
+
return m;
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
static inline
|
| 399 |
+
void ensureSizeIsEnough(Size size, int type, OutputArray arr)
|
| 400 |
+
{
|
| 401 |
+
ensureSizeIsEnough(size.height, size.width, type, arr);
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
static inline
|
| 405 |
+
void swap(GpuMat& a, GpuMat& b)
|
| 406 |
+
{
|
| 407 |
+
a.swap(b);
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
//===================================================================================
|
| 411 |
+
// GpuMatND
|
| 412 |
+
//===================================================================================
|
| 413 |
+
|
| 414 |
+
inline
|
| 415 |
+
GpuMatND::GpuMatND() :
|
| 416 |
+
flags(0), dims(0), data(nullptr), offset(0)
|
| 417 |
+
{
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
inline
|
| 421 |
+
GpuMatND::GpuMatND(SizeArray _size, int _type) :
|
| 422 |
+
flags(0), dims(0), data(nullptr), offset(0)
|
| 423 |
+
{
|
| 424 |
+
create(std::move(_size), _type);
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
inline
|
| 428 |
+
void GpuMatND::swap(GpuMatND& m) noexcept
|
| 429 |
+
{
|
| 430 |
+
std::swap(*this, m);
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
inline
|
| 434 |
+
bool GpuMatND::isContinuous() const
|
| 435 |
+
{
|
| 436 |
+
return (flags & Mat::CONTINUOUS_FLAG) != 0;
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
inline
|
| 440 |
+
bool GpuMatND::isSubmatrix() const
|
| 441 |
+
{
|
| 442 |
+
return (flags & Mat::SUBMATRIX_FLAG) != 0;
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
inline
|
| 446 |
+
size_t GpuMatND::elemSize() const
|
| 447 |
+
{
|
| 448 |
+
return CV_ELEM_SIZE(flags);
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
inline
|
| 452 |
+
size_t GpuMatND::elemSize1() const
|
| 453 |
+
{
|
| 454 |
+
return CV_ELEM_SIZE1(flags);
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
inline
|
| 458 |
+
bool GpuMatND::empty() const
|
| 459 |
+
{
|
| 460 |
+
return data == nullptr;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
inline
|
| 464 |
+
bool GpuMatND::external() const
|
| 465 |
+
{
|
| 466 |
+
return !empty() && data_.use_count() == 0;
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
inline
|
| 470 |
+
uchar* GpuMatND::getDevicePtr() const
|
| 471 |
+
{
|
| 472 |
+
return data + offset;
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
inline
|
| 476 |
+
size_t GpuMatND::total() const
|
| 477 |
+
{
|
| 478 |
+
size_t p = 1;
|
| 479 |
+
for(auto s : size)
|
| 480 |
+
p *= s;
|
| 481 |
+
return p;
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
inline
|
| 485 |
+
size_t GpuMatND::totalMemSize() const
|
| 486 |
+
{
|
| 487 |
+
return size[0] * step[0];
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
inline
|
| 491 |
+
int GpuMatND::type() const
|
| 492 |
+
{
|
| 493 |
+
return CV_MAT_TYPE(flags);
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
//===================================================================================
|
| 497 |
+
// HostMem
|
| 498 |
+
//===================================================================================
|
| 499 |
+
|
| 500 |
+
inline
|
| 501 |
+
HostMem::HostMem(AllocType alloc_type_)
|
| 502 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
|
| 503 |
+
{
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
inline
|
| 507 |
+
HostMem::HostMem(const HostMem& m)
|
| 508 |
+
: flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type)
|
| 509 |
+
{
|
| 510 |
+
if( refcount )
|
| 511 |
+
CV_XADD(refcount, 1);
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
inline
|
| 515 |
+
HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_)
|
| 516 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
|
| 517 |
+
{
|
| 518 |
+
if (rows_ > 0 && cols_ > 0)
|
| 519 |
+
create(rows_, cols_, type_);
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
inline
|
| 523 |
+
HostMem::HostMem(Size size_, int type_, AllocType alloc_type_)
|
| 524 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
|
| 525 |
+
{
|
| 526 |
+
if (size_.height > 0 && size_.width > 0)
|
| 527 |
+
create(size_.height, size_.width, type_);
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
inline
|
| 531 |
+
HostMem::HostMem(InputArray arr, AllocType alloc_type_)
|
| 532 |
+
: flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_)
|
| 533 |
+
{
|
| 534 |
+
arr.getMat().copyTo(*this);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
inline
|
| 538 |
+
HostMem::~HostMem()
|
| 539 |
+
{
|
| 540 |
+
release();
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
inline
|
| 544 |
+
HostMem& HostMem::operator =(const HostMem& m)
|
| 545 |
+
{
|
| 546 |
+
if (this != &m)
|
| 547 |
+
{
|
| 548 |
+
HostMem temp(m);
|
| 549 |
+
swap(temp);
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
return *this;
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
inline
|
| 556 |
+
void HostMem::swap(HostMem& b)
|
| 557 |
+
{
|
| 558 |
+
std::swap(flags, b.flags);
|
| 559 |
+
std::swap(rows, b.rows);
|
| 560 |
+
std::swap(cols, b.cols);
|
| 561 |
+
std::swap(step, b.step);
|
| 562 |
+
std::swap(data, b.data);
|
| 563 |
+
std::swap(datastart, b.datastart);
|
| 564 |
+
std::swap(dataend, b.dataend);
|
| 565 |
+
std::swap(refcount, b.refcount);
|
| 566 |
+
std::swap(alloc_type, b.alloc_type);
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
inline
|
| 570 |
+
HostMem HostMem::clone() const
|
| 571 |
+
{
|
| 572 |
+
HostMem m(size(), type(), alloc_type);
|
| 573 |
+
createMatHeader().copyTo(m);
|
| 574 |
+
return m;
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
inline
|
| 578 |
+
void HostMem::create(Size size_, int type_)
|
| 579 |
+
{
|
| 580 |
+
create(size_.height, size_.width, type_);
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
inline
|
| 584 |
+
Mat HostMem::createMatHeader() const
|
| 585 |
+
{
|
| 586 |
+
return Mat(size(), type(), data, step);
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
inline
|
| 590 |
+
bool HostMem::isContinuous() const
|
| 591 |
+
{
|
| 592 |
+
return (flags & Mat::CONTINUOUS_FLAG) != 0;
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
inline
|
| 596 |
+
size_t HostMem::elemSize() const
|
| 597 |
+
{
|
| 598 |
+
return CV_ELEM_SIZE(flags);
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
inline
|
| 602 |
+
size_t HostMem::elemSize1() const
|
| 603 |
+
{
|
| 604 |
+
return CV_ELEM_SIZE1(flags);
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
inline
|
| 608 |
+
int HostMem::type() const
|
| 609 |
+
{
|
| 610 |
+
return CV_MAT_TYPE(flags);
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
inline
|
| 614 |
+
int HostMem::depth() const
|
| 615 |
+
{
|
| 616 |
+
return CV_MAT_DEPTH(flags);
|
| 617 |
+
}
|
| 618 |
+
|
| 619 |
+
inline
|
| 620 |
+
int HostMem::channels() const
|
| 621 |
+
{
|
| 622 |
+
return CV_MAT_CN(flags);
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
inline
|
| 626 |
+
size_t HostMem::step1() const
|
| 627 |
+
{
|
| 628 |
+
return step / elemSize1();
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
inline
|
| 632 |
+
Size HostMem::size() const
|
| 633 |
+
{
|
| 634 |
+
return Size(cols, rows);
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
inline
|
| 638 |
+
bool HostMem::empty() const
|
| 639 |
+
{
|
| 640 |
+
return data == 0;
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
static inline
|
| 644 |
+
void swap(HostMem& a, HostMem& b)
|
| 645 |
+
{
|
| 646 |
+
a.swap(b);
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
//===================================================================================
|
| 650 |
+
// Stream
|
| 651 |
+
//===================================================================================
|
| 652 |
+
|
| 653 |
+
inline
|
| 654 |
+
Stream::Stream(const Ptr<Impl>& impl)
|
| 655 |
+
: impl_(impl)
|
| 656 |
+
{
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
//===================================================================================
|
| 660 |
+
// Event
|
| 661 |
+
//===================================================================================
|
| 662 |
+
|
| 663 |
+
inline
|
| 664 |
+
Event::Event(const Ptr<Impl>& impl)
|
| 665 |
+
: impl_(impl)
|
| 666 |
+
{
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
//===================================================================================
|
| 670 |
+
// Initialization & Info
|
| 671 |
+
//===================================================================================
|
| 672 |
+
|
| 673 |
+
// WARNING: unreachable code using Ninja
|
| 674 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 675 |
+
#pragma warning(push)
|
| 676 |
+
#pragma warning(disable: 4702)
|
| 677 |
+
#endif
|
| 678 |
+
inline
|
| 679 |
+
bool TargetArchs::has(int major, int minor)
|
| 680 |
+
{
|
| 681 |
+
return hasPtx(major, minor) || hasBin(major, minor);
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
inline
|
| 685 |
+
bool TargetArchs::hasEqualOrGreater(int major, int minor)
|
| 686 |
+
{
|
| 687 |
+
return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor);
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
inline
|
| 691 |
+
DeviceInfo::DeviceInfo()
|
| 692 |
+
{
|
| 693 |
+
device_id_ = getDevice();
|
| 694 |
+
}
|
| 695 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 696 |
+
#pragma warning(pop)
|
| 697 |
+
#endif
|
| 698 |
+
|
| 699 |
+
inline
|
| 700 |
+
DeviceInfo::DeviceInfo(int device_id)
|
| 701 |
+
{
|
| 702 |
+
CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() );
|
| 703 |
+
device_id_ = device_id;
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
// WARNING: unreachable code using Ninja
|
| 707 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 708 |
+
#pragma warning(push)
|
| 709 |
+
#pragma warning(disable: 4702)
|
| 710 |
+
#endif
|
| 711 |
+
inline
|
| 712 |
+
int DeviceInfo::deviceID() const
|
| 713 |
+
{
|
| 714 |
+
return device_id_;
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
inline
|
| 718 |
+
size_t DeviceInfo::freeMemory() const
|
| 719 |
+
{
|
| 720 |
+
size_t _totalMemory = 0, _freeMemory = 0;
|
| 721 |
+
queryMemory(_totalMemory, _freeMemory);
|
| 722 |
+
return _freeMemory;
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
inline
|
| 726 |
+
size_t DeviceInfo::totalMemory() const
|
| 727 |
+
{
|
| 728 |
+
size_t _totalMemory = 0, _freeMemory = 0;
|
| 729 |
+
queryMemory(_totalMemory, _freeMemory);
|
| 730 |
+
return _totalMemory;
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
inline
|
| 734 |
+
bool DeviceInfo::supports(FeatureSet feature_set) const
|
| 735 |
+
{
|
| 736 |
+
int version = majorVersion() * 10 + minorVersion();
|
| 737 |
+
return version >= feature_set;
|
| 738 |
+
}
|
| 739 |
+
#if defined _MSC_VER && _MSC_VER >= 1920
|
| 740 |
+
#pragma warning(pop)
|
| 741 |
+
#endif
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
}} // namespace cv { namespace cuda {
|
| 745 |
+
|
| 746 |
+
//===================================================================================
|
| 747 |
+
// Mat
|
| 748 |
+
//===================================================================================
|
| 749 |
+
|
| 750 |
+
namespace cv {
|
| 751 |
+
|
| 752 |
+
inline
|
| 753 |
+
Mat::Mat(const cuda::GpuMat& m)
|
| 754 |
+
: flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows)
|
| 755 |
+
{
|
| 756 |
+
m.download(*this);
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
//! @endcond
|
| 762 |
+
|
| 763 |
+
#endif // OPENCV_CORE_CUDAINL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/block.hpp
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_DEVICE_BLOCK_HPP
|
| 44 |
+
#define OPENCV_CUDA_DEVICE_BLOCK_HPP
|
| 45 |
+
|
| 46 |
+
/** @file
|
| 47 |
+
* @deprecated Use @ref cudev instead.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
struct Block
|
| 55 |
+
{
|
| 56 |
+
static __device__ __forceinline__ unsigned int id()
|
| 57 |
+
{
|
| 58 |
+
return blockIdx.x;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
static __device__ __forceinline__ unsigned int stride()
|
| 62 |
+
{
|
| 63 |
+
return blockDim.x * blockDim.y * blockDim.z;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
static __device__ __forceinline__ void sync()
|
| 67 |
+
{
|
| 68 |
+
__syncthreads();
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
static __device__ __forceinline__ int flattenedThreadId()
|
| 72 |
+
{
|
| 73 |
+
return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template<typename It, typename T>
|
| 77 |
+
static __device__ __forceinline__ void fill(It beg, It end, const T& value)
|
| 78 |
+
{
|
| 79 |
+
int STRIDE = stride();
|
| 80 |
+
It t = beg + flattenedThreadId();
|
| 81 |
+
|
| 82 |
+
for(; t < end; t += STRIDE)
|
| 83 |
+
*t = value;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template<typename OutIt, typename T>
|
| 87 |
+
static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
|
| 88 |
+
{
|
| 89 |
+
int STRIDE = stride();
|
| 90 |
+
int tid = flattenedThreadId();
|
| 91 |
+
value += tid;
|
| 92 |
+
|
| 93 |
+
for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE)
|
| 94 |
+
*t = value;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
template<typename InIt, typename OutIt>
|
| 98 |
+
static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)
|
| 99 |
+
{
|
| 100 |
+
int STRIDE = stride();
|
| 101 |
+
InIt t = beg + flattenedThreadId();
|
| 102 |
+
OutIt o = out + (t - beg);
|
| 103 |
+
|
| 104 |
+
for(; t < end; t += STRIDE, o += STRIDE)
|
| 105 |
+
*o = *t;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
template<typename InIt, typename OutIt, class UnOp>
|
| 109 |
+
static __device__ __forceinline__ void transform(InIt beg, InIt end, OutIt out, UnOp op)
|
| 110 |
+
{
|
| 111 |
+
int STRIDE = stride();
|
| 112 |
+
InIt t = beg + flattenedThreadId();
|
| 113 |
+
OutIt o = out + (t - beg);
|
| 114 |
+
|
| 115 |
+
for(; t < end; t += STRIDE, o += STRIDE)
|
| 116 |
+
*o = op(*t);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
|
| 120 |
+
static __device__ __forceinline__ void transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
|
| 121 |
+
{
|
| 122 |
+
int STRIDE = stride();
|
| 123 |
+
InIt1 t1 = beg1 + flattenedThreadId();
|
| 124 |
+
InIt2 t2 = beg2 + flattenedThreadId();
|
| 125 |
+
OutIt o = out + (t1 - beg1);
|
| 126 |
+
|
| 127 |
+
for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
|
| 128 |
+
*o = op(*t1, *t2);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
template<int CTA_SIZE, typename T, class BinOp>
|
| 132 |
+
static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
|
| 133 |
+
{
|
| 134 |
+
int tid = flattenedThreadId();
|
| 135 |
+
T val = buffer[tid];
|
| 136 |
+
|
| 137 |
+
if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
|
| 138 |
+
if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
|
| 139 |
+
if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
|
| 140 |
+
if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
|
| 141 |
+
|
| 142 |
+
if (tid < 32)
|
| 143 |
+
{
|
| 144 |
+
if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
|
| 145 |
+
if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
|
| 146 |
+
if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
|
| 147 |
+
if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
|
| 148 |
+
if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
|
| 149 |
+
if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template<int CTA_SIZE, typename T, class BinOp>
|
| 154 |
+
static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
|
| 155 |
+
{
|
| 156 |
+
int tid = flattenedThreadId();
|
| 157 |
+
T val = buffer[tid] = init;
|
| 158 |
+
__syncthreads();
|
| 159 |
+
|
| 160 |
+
if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
|
| 161 |
+
if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
|
| 162 |
+
if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
|
| 163 |
+
if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
|
| 164 |
+
|
| 165 |
+
if (tid < 32)
|
| 166 |
+
{
|
| 167 |
+
if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
|
| 168 |
+
if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
|
| 169 |
+
if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
|
| 170 |
+
if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
|
| 171 |
+
if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
|
| 172 |
+
if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
|
| 173 |
+
}
|
| 174 |
+
__syncthreads();
|
| 175 |
+
return buffer[0];
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
template <typename T, class BinOp>
|
| 179 |
+
static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op)
|
| 180 |
+
{
|
| 181 |
+
int ftid = flattenedThreadId();
|
| 182 |
+
int sft = stride();
|
| 183 |
+
|
| 184 |
+
if (sft < n)
|
| 185 |
+
{
|
| 186 |
+
for (unsigned int i = sft + ftid; i < n; i += sft)
|
| 187 |
+
data[ftid] = op(data[ftid], data[i]);
|
| 188 |
+
|
| 189 |
+
__syncthreads();
|
| 190 |
+
|
| 191 |
+
n = sft;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
while (n > 1)
|
| 195 |
+
{
|
| 196 |
+
unsigned int half = n/2;
|
| 197 |
+
|
| 198 |
+
if (ftid < half)
|
| 199 |
+
data[ftid] = op(data[ftid], data[n - ftid - 1]);
|
| 200 |
+
|
| 201 |
+
__syncthreads();
|
| 202 |
+
|
| 203 |
+
n = n - half;
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
};
|
| 207 |
+
}}}
|
| 208 |
+
|
| 209 |
+
//! @endcond
|
| 210 |
+
|
| 211 |
+
#endif /* OPENCV_CUDA_DEVICE_BLOCK_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda/border_interpolate.hpp
ADDED
|
@@ -0,0 +1,722 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_BORDER_INTERPOLATE_HPP
|
| 44 |
+
#define OPENCV_CUDA_BORDER_INTERPOLATE_HPP
|
| 45 |
+
|
| 46 |
+
#include "saturate_cast.hpp"
|
| 47 |
+
#include "vec_traits.hpp"
|
| 48 |
+
#include "vec_math.hpp"
|
| 49 |
+
|
| 50 |
+
/** @file
|
| 51 |
+
* @deprecated Use @ref cudev instead.
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
//! @cond IGNORED
|
| 55 |
+
|
| 56 |
+
namespace cv { namespace cuda { namespace device
|
| 57 |
+
{
|
| 58 |
+
//////////////////////////////////////////////////////////////
|
| 59 |
+
// BrdConstant
|
| 60 |
+
|
| 61 |
+
template <typename D> struct BrdRowConstant
|
| 62 |
+
{
|
| 63 |
+
typedef D result_type;
|
| 64 |
+
|
| 65 |
+
explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits<D>::all(0)) : width(width_), val(val_) {}
|
| 66 |
+
|
| 67 |
+
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
|
| 68 |
+
{
|
| 69 |
+
return x >= 0 ? saturate_cast<D>(data[x]) : val;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
|
| 73 |
+
{
|
| 74 |
+
return x < width ? saturate_cast<D>(data[x]) : val;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
|
| 78 |
+
{
|
| 79 |
+
return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
int width;
|
| 83 |
+
D val;
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
template <typename D> struct BrdColConstant
|
| 87 |
+
{
|
| 88 |
+
typedef D result_type;
|
| 89 |
+
|
| 90 |
+
explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits<D>::all(0)) : height(height_), val(val_) {}
|
| 91 |
+
|
| 92 |
+
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
|
| 93 |
+
{
|
| 94 |
+
return y >= 0 ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
|
| 98 |
+
{
|
| 99 |
+
return y < height ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
|
| 103 |
+
{
|
| 104 |
+
return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
int height;
|
| 108 |
+
D val;
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
template <typename D> struct BrdConstant
|
| 112 |
+
{
|
| 113 |
+
typedef D result_type;
|
| 114 |
+
|
| 115 |
+
__host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) : height(height_), width(width_), val(val_)
|
| 116 |
+
{
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
|
| 120 |
+
{
|
| 121 |
+
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(((const T*)((const uchar*)data + y * step))[x]) : val;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
|
| 125 |
+
{
|
| 126 |
+
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
int height;
|
| 130 |
+
int width;
|
| 131 |
+
D val;
|
| 132 |
+
};
|
| 133 |
+
|
| 134 |
+
//////////////////////////////////////////////////////////////
|
| 135 |
+
// BrdReplicate
|
| 136 |
+
|
| 137 |
+
template <typename D> struct BrdRowReplicate
|
| 138 |
+
{
|
| 139 |
+
typedef D result_type;
|
| 140 |
+
|
| 141 |
+
explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {}
|
| 142 |
+
template <typename U> __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {}
|
| 143 |
+
|
| 144 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 145 |
+
{
|
| 146 |
+
return ::max(x, 0);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 150 |
+
{
|
| 151 |
+
return ::min(x, last_col);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 155 |
+
{
|
| 156 |
+
return idx_col_low(idx_col_high(x));
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
|
| 160 |
+
{
|
| 161 |
+
return saturate_cast<D>(data[idx_col_low(x)]);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
|
| 165 |
+
{
|
| 166 |
+
return saturate_cast<D>(data[idx_col_high(x)]);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
|
| 170 |
+
{
|
| 171 |
+
return saturate_cast<D>(data[idx_col(x)]);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
int last_col;
|
| 175 |
+
};
|
| 176 |
+
|
| 177 |
+
template <typename D> struct BrdColReplicate
|
| 178 |
+
{
|
| 179 |
+
typedef D result_type;
|
| 180 |
+
|
| 181 |
+
explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {}
|
| 182 |
+
template <typename U> __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {}
|
| 183 |
+
|
| 184 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 185 |
+
{
|
| 186 |
+
return ::max(y, 0);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 190 |
+
{
|
| 191 |
+
return ::min(y, last_row);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 195 |
+
{
|
| 196 |
+
return idx_row_low(idx_row_high(y));
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
|
| 200 |
+
{
|
| 201 |
+
return saturate_cast<D>(*(const T*)((const char*)data + idx_row_low(y) * step));
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
|
| 205 |
+
{
|
| 206 |
+
return saturate_cast<D>(*(const T*)((const char*)data + idx_row_high(y) * step));
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
|
| 210 |
+
{
|
| 211 |
+
return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
int last_row;
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
template <typename D> struct BrdReplicate
|
| 218 |
+
{
|
| 219 |
+
typedef D result_type;
|
| 220 |
+
|
| 221 |
+
__host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
| 222 |
+
template <typename U> __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
| 223 |
+
|
| 224 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 225 |
+
{
|
| 226 |
+
return ::max(y, 0);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 230 |
+
{
|
| 231 |
+
return ::min(y, last_row);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 235 |
+
{
|
| 236 |
+
return idx_row_low(idx_row_high(y));
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 240 |
+
{
|
| 241 |
+
return ::max(x, 0);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 245 |
+
{
|
| 246 |
+
return ::min(x, last_col);
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 250 |
+
{
|
| 251 |
+
return idx_col_low(idx_col_high(x));
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
|
| 255 |
+
{
|
| 256 |
+
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
|
| 260 |
+
{
|
| 261 |
+
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
int last_row;
|
| 265 |
+
int last_col;
|
| 266 |
+
};
|
| 267 |
+
|
| 268 |
+
//////////////////////////////////////////////////////////////
|
| 269 |
+
// BrdReflect101
|
| 270 |
+
|
| 271 |
+
template <typename D> struct BrdRowReflect101
|
| 272 |
+
{
|
| 273 |
+
typedef D result_type;
|
| 274 |
+
|
| 275 |
+
explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {}
|
| 276 |
+
template <typename U> __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {}
|
| 277 |
+
|
| 278 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 279 |
+
{
|
| 280 |
+
return ::abs(x) % (last_col + 1);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 284 |
+
{
|
| 285 |
+
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 289 |
+
{
|
| 290 |
+
return idx_col_low(idx_col_high(x));
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
|
| 294 |
+
{
|
| 295 |
+
return saturate_cast<D>(data[idx_col_low(x)]);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
|
| 299 |
+
{
|
| 300 |
+
return saturate_cast<D>(data[idx_col_high(x)]);
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
|
| 304 |
+
{
|
| 305 |
+
return saturate_cast<D>(data[idx_col(x)]);
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
int last_col;
|
| 309 |
+
};
|
| 310 |
+
|
| 311 |
+
template <typename D> struct BrdColReflect101
|
| 312 |
+
{
|
| 313 |
+
typedef D result_type;
|
| 314 |
+
|
| 315 |
+
explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {}
|
| 316 |
+
template <typename U> __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {}
|
| 317 |
+
|
| 318 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 319 |
+
{
|
| 320 |
+
return ::abs(y) % (last_row + 1);
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 324 |
+
{
|
| 325 |
+
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 329 |
+
{
|
| 330 |
+
return idx_row_low(idx_row_high(y));
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
|
| 334 |
+
{
|
| 335 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
|
| 339 |
+
{
|
| 340 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
|
| 344 |
+
{
|
| 345 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
int last_row;
|
| 349 |
+
};
|
| 350 |
+
|
| 351 |
+
template <typename D> struct BrdReflect101
|
| 352 |
+
{
|
| 353 |
+
typedef D result_type;
|
| 354 |
+
|
| 355 |
+
__host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
| 356 |
+
template <typename U> __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
| 357 |
+
|
| 358 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 359 |
+
{
|
| 360 |
+
return ::abs(y) % (last_row + 1);
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 364 |
+
{
|
| 365 |
+
return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 369 |
+
{
|
| 370 |
+
return idx_row_low(idx_row_high(y));
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 374 |
+
{
|
| 375 |
+
return ::abs(x) % (last_col + 1);
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 379 |
+
{
|
| 380 |
+
return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 384 |
+
{
|
| 385 |
+
return idx_col_low(idx_col_high(x));
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
|
| 389 |
+
{
|
| 390 |
+
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
|
| 394 |
+
{
|
| 395 |
+
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
int last_row;
|
| 399 |
+
int last_col;
|
| 400 |
+
};
|
| 401 |
+
|
| 402 |
+
//////////////////////////////////////////////////////////////
|
| 403 |
+
// BrdReflect
|
| 404 |
+
|
| 405 |
+
template <typename D> struct BrdRowReflect
|
| 406 |
+
{
|
| 407 |
+
typedef D result_type;
|
| 408 |
+
|
| 409 |
+
explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {}
|
| 410 |
+
template <typename U> __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {}
|
| 411 |
+
|
| 412 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 413 |
+
{
|
| 414 |
+
return (::abs(x) - (x < 0)) % (last_col + 1);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 418 |
+
{
|
| 419 |
+
return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1);
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 423 |
+
{
|
| 424 |
+
return idx_col_high(::abs(x) - (x < 0));
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
|
| 428 |
+
{
|
| 429 |
+
return saturate_cast<D>(data[idx_col_low(x)]);
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
|
| 433 |
+
{
|
| 434 |
+
return saturate_cast<D>(data[idx_col_high(x)]);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
|
| 438 |
+
{
|
| 439 |
+
return saturate_cast<D>(data[idx_col(x)]);
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
int last_col;
|
| 443 |
+
};
|
| 444 |
+
|
| 445 |
+
template <typename D> struct BrdColReflect
|
| 446 |
+
{
|
| 447 |
+
typedef D result_type;
|
| 448 |
+
|
| 449 |
+
explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {}
|
| 450 |
+
template <typename U> __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {}
|
| 451 |
+
|
| 452 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 453 |
+
{
|
| 454 |
+
return (::abs(y) - (y < 0)) % (last_row + 1);
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 458 |
+
{
|
| 459 |
+
return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1);
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 463 |
+
{
|
| 464 |
+
return idx_row_high(::abs(y) - (y < 0));
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
|
| 468 |
+
{
|
| 469 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
|
| 473 |
+
{
|
| 474 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
|
| 478 |
+
{
|
| 479 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
int last_row;
|
| 483 |
+
};
|
| 484 |
+
|
| 485 |
+
template <typename D> struct BrdReflect
|
| 486 |
+
{
|
| 487 |
+
typedef D result_type;
|
| 488 |
+
|
| 489 |
+
__host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {}
|
| 490 |
+
template <typename U> __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
|
| 491 |
+
|
| 492 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 493 |
+
{
|
| 494 |
+
return (::abs(y) - (y < 0)) % (last_row + 1);
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 498 |
+
{
|
| 499 |
+
return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 503 |
+
{
|
| 504 |
+
return idx_row_low(idx_row_high(y));
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 508 |
+
{
|
| 509 |
+
return (::abs(x) - (x < 0)) % (last_col + 1);
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 513 |
+
{
|
| 514 |
+
return (last_col - ::abs(last_col - x) + (x > last_col));
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 518 |
+
{
|
| 519 |
+
return idx_col_low(idx_col_high(x));
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
|
| 523 |
+
{
|
| 524 |
+
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
|
| 528 |
+
{
|
| 529 |
+
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
int last_row;
|
| 533 |
+
int last_col;
|
| 534 |
+
};
|
| 535 |
+
|
| 536 |
+
//////////////////////////////////////////////////////////////
|
| 537 |
+
// BrdWrap
|
| 538 |
+
|
| 539 |
+
template <typename D> struct BrdRowWrap
|
| 540 |
+
{
|
| 541 |
+
typedef D result_type;
|
| 542 |
+
|
| 543 |
+
explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {}
|
| 544 |
+
template <typename U> __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {}
|
| 545 |
+
|
| 546 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 547 |
+
{
|
| 548 |
+
return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 552 |
+
{
|
| 553 |
+
return (x < width) * x + (x >= width) * (x % width);
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 557 |
+
{
|
| 558 |
+
return idx_col_high(idx_col_low(x));
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
|
| 562 |
+
{
|
| 563 |
+
return saturate_cast<D>(data[idx_col_low(x)]);
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
|
| 567 |
+
{
|
| 568 |
+
return saturate_cast<D>(data[idx_col_high(x)]);
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
|
| 572 |
+
{
|
| 573 |
+
return saturate_cast<D>(data[idx_col(x)]);
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
int width;
|
| 577 |
+
};
|
| 578 |
+
|
| 579 |
+
template <typename D> struct BrdColWrap
|
| 580 |
+
{
|
| 581 |
+
typedef D result_type;
|
| 582 |
+
|
| 583 |
+
explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {}
|
| 584 |
+
template <typename U> __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {}
|
| 585 |
+
|
| 586 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 587 |
+
{
|
| 588 |
+
return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 592 |
+
{
|
| 593 |
+
return (y < height) * y + (y >= height) * (y % height);
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 597 |
+
{
|
| 598 |
+
return idx_row_high(idx_row_low(y));
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
|
| 602 |
+
{
|
| 603 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
|
| 607 |
+
{
|
| 608 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
|
| 612 |
+
{
|
| 613 |
+
return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
int height;
|
| 617 |
+
};
|
| 618 |
+
|
| 619 |
+
template <typename D> struct BrdWrap
|
| 620 |
+
{
|
| 621 |
+
typedef D result_type;
|
| 622 |
+
|
| 623 |
+
__host__ __device__ __forceinline__ BrdWrap(int height_, int width_) :
|
| 624 |
+
height(height_), width(width_)
|
| 625 |
+
{
|
| 626 |
+
}
|
| 627 |
+
template <typename U>
|
| 628 |
+
__host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) :
|
| 629 |
+
height(height_), width(width_)
|
| 630 |
+
{
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
__device__ __forceinline__ int idx_row_low(int y) const
|
| 634 |
+
{
|
| 635 |
+
return (y >= 0) ? y : (y - ((y - height + 1) / height) * height);
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
__device__ __forceinline__ int idx_row_high(int y) const
|
| 639 |
+
{
|
| 640 |
+
return (y < height) ? y : (y % height);
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
__device__ __forceinline__ int idx_row(int y) const
|
| 644 |
+
{
|
| 645 |
+
return idx_row_high(idx_row_low(y));
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
__device__ __forceinline__ int idx_col_low(int x) const
|
| 649 |
+
{
|
| 650 |
+
return (x >= 0) ? x : (x - ((x - width + 1) / width) * width);
|
| 651 |
+
}
|
| 652 |
+
|
| 653 |
+
__device__ __forceinline__ int idx_col_high(int x) const
|
| 654 |
+
{
|
| 655 |
+
return (x < width) ? x : (x % width);
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
__device__ __forceinline__ int idx_col(int x) const
|
| 659 |
+
{
|
| 660 |
+
return idx_col_high(idx_col_low(x));
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
|
| 664 |
+
{
|
| 665 |
+
return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
|
| 669 |
+
{
|
| 670 |
+
return saturate_cast<D>(src(idx_row(y), idx_col(x)));
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
int height;
|
| 674 |
+
int width;
|
| 675 |
+
};
|
| 676 |
+
|
| 677 |
+
//////////////////////////////////////////////////////////////
|
| 678 |
+
// BorderReader
|
| 679 |
+
|
| 680 |
+
template <typename Ptr2D, typename B> struct BorderReader
|
| 681 |
+
{
|
| 682 |
+
typedef typename B::result_type elem_type;
|
| 683 |
+
typedef typename Ptr2D::index_type index_type;
|
| 684 |
+
|
| 685 |
+
__host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {}
|
| 686 |
+
|
| 687 |
+
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const
|
| 688 |
+
{
|
| 689 |
+
return b.at(y, x, ptr);
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
Ptr2D ptr;
|
| 693 |
+
B b;
|
| 694 |
+
};
|
| 695 |
+
|
| 696 |
+
// under win32 there is some bug with templated types that passed as kernel parameters
|
| 697 |
+
// with this specialization all works fine
|
| 698 |
+
template <typename Ptr2D, typename D> struct BorderReader< Ptr2D, BrdConstant<D> >
|
| 699 |
+
{
|
| 700 |
+
typedef typename BrdConstant<D>::result_type elem_type;
|
| 701 |
+
typedef typename Ptr2D::index_type index_type;
|
| 702 |
+
|
| 703 |
+
__host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant<D>& b) :
|
| 704 |
+
src(src_), height(b.height), width(b.width), val(b.val)
|
| 705 |
+
{
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
__device__ __forceinline__ D operator ()(index_type y, index_type x) const
|
| 709 |
+
{
|
| 710 |
+
return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
Ptr2D src;
|
| 714 |
+
int height;
|
| 715 |
+
int width;
|
| 716 |
+
D val;
|
| 717 |
+
};
|
| 718 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 719 |
+
|
| 720 |
+
//! @endcond
|
| 721 |
+
|
| 722 |
+
#endif // OPENCV_CUDA_BORDER_INTERPOLATE_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/color.hpp
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_COLOR_HPP
|
| 44 |
+
#define OPENCV_CUDA_COLOR_HPP
|
| 45 |
+
|
| 46 |
+
#include "detail/color_detail.hpp"
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
// All OPENCV_CUDA_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
|
| 57 |
+
// template <typename T> class ColorSpace1_to_ColorSpace2_traits
|
| 58 |
+
// {
|
| 59 |
+
// typedef ... functor_type;
|
| 60 |
+
// static __host__ __device__ functor_type create_functor();
|
| 61 |
+
// };
|
| 62 |
+
|
| 63 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)
|
| 64 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)
|
| 65 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)
|
| 66 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)
|
| 67 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)
|
| 68 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)
|
| 69 |
+
|
| 70 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS
|
| 71 |
+
|
| 72 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)
|
| 73 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)
|
| 74 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)
|
| 75 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)
|
| 76 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)
|
| 77 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)
|
| 78 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)
|
| 79 |
+
OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)
|
| 80 |
+
|
| 81 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS
|
| 82 |
+
|
| 83 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)
|
| 84 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)
|
| 85 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)
|
| 86 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)
|
| 87 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)
|
| 88 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)
|
| 89 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)
|
| 90 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)
|
| 91 |
+
|
| 92 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS
|
| 93 |
+
|
| 94 |
+
OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)
|
| 95 |
+
OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)
|
| 96 |
+
|
| 97 |
+
#undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS
|
| 98 |
+
|
| 99 |
+
OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)
|
| 100 |
+
OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)
|
| 101 |
+
|
| 102 |
+
#undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS
|
| 103 |
+
|
| 104 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)
|
| 105 |
+
OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)
|
| 106 |
+
|
| 107 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS
|
| 108 |
+
|
| 109 |
+
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)
|
| 110 |
+
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)
|
| 111 |
+
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)
|
| 112 |
+
OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)
|
| 113 |
+
|
| 114 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS
|
| 115 |
+
|
| 116 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)
|
| 117 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)
|
| 118 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)
|
| 119 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)
|
| 120 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)
|
| 121 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)
|
| 122 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)
|
| 123 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)
|
| 124 |
+
|
| 125 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS
|
| 126 |
+
|
| 127 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)
|
| 128 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)
|
| 129 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)
|
| 130 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)
|
| 131 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)
|
| 132 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)
|
| 133 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)
|
| 134 |
+
OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)
|
| 135 |
+
|
| 136 |
+
#undef OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS
|
| 137 |
+
|
| 138 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)
|
| 139 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)
|
| 140 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)
|
| 141 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)
|
| 142 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)
|
| 143 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)
|
| 144 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)
|
| 145 |
+
OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)
|
| 146 |
+
|
| 147 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS
|
| 148 |
+
|
| 149 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)
|
| 150 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)
|
| 151 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)
|
| 152 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)
|
| 153 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)
|
| 154 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)
|
| 155 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)
|
| 156 |
+
OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)
|
| 157 |
+
|
| 158 |
+
#undef OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS
|
| 159 |
+
|
| 160 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)
|
| 161 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)
|
| 162 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)
|
| 163 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)
|
| 164 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)
|
| 165 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)
|
| 166 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)
|
| 167 |
+
OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)
|
| 168 |
+
|
| 169 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS
|
| 170 |
+
|
| 171 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)
|
| 172 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)
|
| 173 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)
|
| 174 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)
|
| 175 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)
|
| 176 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)
|
| 177 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)
|
| 178 |
+
OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)
|
| 179 |
+
|
| 180 |
+
#undef OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS
|
| 181 |
+
|
| 182 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)
|
| 183 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)
|
| 184 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)
|
| 185 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)
|
| 186 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)
|
| 187 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)
|
| 188 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)
|
| 189 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)
|
| 190 |
+
|
| 191 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS
|
| 192 |
+
|
| 193 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)
|
| 194 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)
|
| 195 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)
|
| 196 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)
|
| 197 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)
|
| 198 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)
|
| 199 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)
|
| 200 |
+
OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)
|
| 201 |
+
|
| 202 |
+
#undef OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS
|
| 203 |
+
|
| 204 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)
|
| 205 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)
|
| 206 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)
|
| 207 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)
|
| 208 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)
|
| 209 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)
|
| 210 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)
|
| 211 |
+
OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)
|
| 212 |
+
|
| 213 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS
|
| 214 |
+
|
| 215 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)
|
| 216 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)
|
| 217 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)
|
| 218 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)
|
| 219 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)
|
| 220 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)
|
| 221 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)
|
| 222 |
+
OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
|
| 223 |
+
|
| 224 |
+
#undef OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS
|
| 225 |
+
|
| 226 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
|
| 227 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
|
| 228 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
|
| 229 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
|
| 230 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
|
| 231 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
|
| 232 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
|
| 233 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
|
| 234 |
+
|
| 235 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
|
| 236 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
|
| 237 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
|
| 238 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
|
| 239 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
|
| 240 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
|
| 241 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
|
| 242 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
|
| 243 |
+
|
| 244 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS
|
| 245 |
+
|
| 246 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
|
| 247 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
|
| 248 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
|
| 249 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
|
| 250 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
|
| 251 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
|
| 252 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
|
| 253 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
|
| 254 |
+
|
| 255 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
|
| 256 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
|
| 257 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
|
| 258 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
|
| 259 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
|
| 260 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
|
| 261 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
|
| 262 |
+
OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
|
| 263 |
+
|
| 264 |
+
#undef OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS
|
| 265 |
+
|
| 266 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
|
| 267 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
|
| 268 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
|
| 269 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
|
| 270 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
|
| 271 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
|
| 272 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
|
| 273 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
|
| 274 |
+
|
| 275 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
|
| 276 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
|
| 277 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
|
| 278 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
|
| 279 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
|
| 280 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
|
| 281 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
|
| 282 |
+
OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
|
| 283 |
+
|
| 284 |
+
#undef OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS
|
| 285 |
+
|
| 286 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
|
| 287 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
|
| 288 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
|
| 289 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
|
| 290 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
|
| 291 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
|
| 292 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
|
| 293 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
|
| 294 |
+
|
| 295 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
|
| 296 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
|
| 297 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
|
| 298 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
|
| 299 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
|
| 300 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
|
| 301 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
|
| 302 |
+
OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
|
| 303 |
+
|
| 304 |
+
#undef OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS
|
| 305 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 306 |
+
|
| 307 |
+
//! @endcond
|
| 308 |
+
|
| 309 |
+
#endif // OPENCV_CUDA_COLOR_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/common.hpp
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_COMMON_HPP
|
| 44 |
+
#define OPENCV_CUDA_COMMON_HPP
|
| 45 |
+
|
| 46 |
+
#include <cuda_runtime.h>
|
| 47 |
+
#include "opencv2/core/cuda_types.hpp"
|
| 48 |
+
#include "opencv2/core/cvdef.h"
|
| 49 |
+
#include "opencv2/core/base.hpp"
|
| 50 |
+
|
| 51 |
+
/** @file
|
| 52 |
+
* @deprecated Use @ref cudev instead.
|
| 53 |
+
*/
|
| 54 |
+
|
| 55 |
+
//! @cond IGNORED
|
| 56 |
+
|
| 57 |
+
#ifndef CV_PI_F
|
| 58 |
+
#ifndef CV_PI
|
| 59 |
+
#define CV_PI_F 3.14159265f
|
| 60 |
+
#else
|
| 61 |
+
#define CV_PI_F ((float)CV_PI)
|
| 62 |
+
#endif
|
| 63 |
+
#endif
|
| 64 |
+
|
| 65 |
+
namespace cv { namespace cuda {
|
| 66 |
+
static inline void checkCudaError(cudaError_t err, const char* file, const int line, const char* func)
|
| 67 |
+
{
|
| 68 |
+
if (cudaSuccess != err) {
|
| 69 |
+
cudaGetLastError(); // reset the last stored error to cudaSuccess
|
| 70 |
+
cv::error(cv::Error::GpuApiCallError, cudaGetErrorString(err), func, file, line);
|
| 71 |
+
}
|
| 72 |
+
}
|
| 73 |
+
}}
|
| 74 |
+
|
| 75 |
+
#ifndef cudaSafeCall
|
| 76 |
+
#define cudaSafeCall(expr) cv::cuda::checkCudaError(expr, __FILE__, __LINE__, CV_Func)
|
| 77 |
+
#endif
|
| 78 |
+
|
| 79 |
+
namespace cv { namespace cuda
|
| 80 |
+
{
|
| 81 |
+
template <typename T> static inline bool isAligned(const T* ptr, size_t size)
|
| 82 |
+
{
|
| 83 |
+
return reinterpret_cast<size_t>(ptr) % size == 0;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
static inline bool isAligned(size_t step, size_t size)
|
| 87 |
+
{
|
| 88 |
+
return step % size == 0;
|
| 89 |
+
}
|
| 90 |
+
}}
|
| 91 |
+
|
| 92 |
+
namespace cv { namespace cuda
|
| 93 |
+
{
|
| 94 |
+
namespace device
|
| 95 |
+
{
|
| 96 |
+
__host__ __device__ __forceinline__ int divUp(int total, int grain)
|
| 97 |
+
{
|
| 98 |
+
return (total + grain - 1) / grain;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
#if (CUDART_VERSION >= 12000)
|
| 102 |
+
template<class T> inline void createTextureObjectPitch2D(cudaTextureObject_t*, PtrStepSz<T>&, const cudaTextureDesc&) {
|
| 103 |
+
CV_Error(cv::Error::GpuNotSupported, "Function removed in CUDA SDK 12"); }
|
| 104 |
+
#else
|
| 105 |
+
//TODO: remove from OpenCV 5.x
|
| 106 |
+
template<class T> inline void bindTexture(const textureReference* tex, const PtrStepSz<T>& img)
|
| 107 |
+
{
|
| 108 |
+
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
|
| 109 |
+
cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) );
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template<class T> inline void createTextureObjectPitch2D(cudaTextureObject_t* tex, PtrStepSz<T>& img, const cudaTextureDesc& texDesc)
|
| 113 |
+
{
|
| 114 |
+
cudaResourceDesc resDesc;
|
| 115 |
+
memset(&resDesc, 0, sizeof(resDesc));
|
| 116 |
+
resDesc.resType = cudaResourceTypePitch2D;
|
| 117 |
+
resDesc.res.pitch2D.devPtr = static_cast<void*>(img.ptr());
|
| 118 |
+
resDesc.res.pitch2D.height = img.rows;
|
| 119 |
+
resDesc.res.pitch2D.width = img.cols;
|
| 120 |
+
resDesc.res.pitch2D.pitchInBytes = img.step;
|
| 121 |
+
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<T>();
|
| 122 |
+
|
| 123 |
+
cudaSafeCall( cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL) );
|
| 124 |
+
}
|
| 125 |
+
#endif
|
| 126 |
+
}
|
| 127 |
+
}}
|
| 128 |
+
|
| 129 |
+
//! @endcond
|
| 130 |
+
|
| 131 |
+
#endif // OPENCV_CUDA_COMMON_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/datamov_utils.hpp
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_DATAMOV_UTILS_HPP
|
| 44 |
+
#define OPENCV_CUDA_DATAMOV_UTILS_HPP
|
| 45 |
+
|
| 46 |
+
#include "common.hpp"
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
|
| 57 |
+
|
| 58 |
+
// for Fermi memory space is detected automatically
|
| 59 |
+
template <typename T> struct ForceGlob
|
| 60 |
+
{
|
| 61 |
+
__device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; }
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
#else // __CUDA_ARCH__ >= 200
|
| 65 |
+
|
| 66 |
+
#if defined(_WIN64) || defined(__LP64__)
|
| 67 |
+
// 64-bit register modifier for inlined asm
|
| 68 |
+
#define OPENCV_CUDA_ASM_PTR "l"
|
| 69 |
+
#else
|
| 70 |
+
// 32-bit register modifier for inlined asm
|
| 71 |
+
#define OPENCV_CUDA_ASM_PTR "r"
|
| 72 |
+
#endif
|
| 73 |
+
|
| 74 |
+
template<class T> struct ForceGlob;
|
| 75 |
+
|
| 76 |
+
#define OPENCV_CUDA_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
|
| 77 |
+
template <> struct ForceGlob<base_type> \
|
| 78 |
+
{ \
|
| 79 |
+
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
|
| 80 |
+
{ \
|
| 81 |
+
asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
|
| 82 |
+
} \
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
#define OPENCV_CUDA_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
|
| 86 |
+
template <> struct ForceGlob<base_type> \
|
| 87 |
+
{ \
|
| 88 |
+
__device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
|
| 89 |
+
{ \
|
| 90 |
+
asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \
|
| 91 |
+
} \
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(uchar, u8)
|
| 95 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(schar, s8)
|
| 96 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB_B(char, b8)
|
| 97 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (ushort, u16, h)
|
| 98 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (short, s16, h)
|
| 99 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (uint, u32, r)
|
| 100 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (int, s32, r)
|
| 101 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (float, f32, f)
|
| 102 |
+
OPENCV_CUDA_DEFINE_FORCE_GLOB (double, f64, d)
|
| 103 |
+
|
| 104 |
+
#undef OPENCV_CUDA_DEFINE_FORCE_GLOB
|
| 105 |
+
#undef OPENCV_CUDA_DEFINE_FORCE_GLOB_B
|
| 106 |
+
#undef OPENCV_CUDA_ASM_PTR
|
| 107 |
+
|
| 108 |
+
#endif // __CUDA_ARCH__ >= 200
|
| 109 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 110 |
+
|
| 111 |
+
//! @endcond
|
| 112 |
+
|
| 113 |
+
#endif // OPENCV_CUDA_DATAMOV_UTILS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/detail/color_detail.hpp
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3rdparty/opencv/include/opencv2/core/cuda/detail/reduce.hpp
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_REDUCE_DETAIL_HPP
|
| 44 |
+
#define OPENCV_CUDA_REDUCE_DETAIL_HPP
|
| 45 |
+
|
| 46 |
+
#include <thrust/tuple.h>
|
| 47 |
+
#include "../warp.hpp"
|
| 48 |
+
#include "../warp_shuffle.hpp"
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
namespace reduce_detail
|
| 55 |
+
{
|
| 56 |
+
template <typename T> struct GetType;
|
| 57 |
+
template <typename T> struct GetType<T*>
|
| 58 |
+
{
|
| 59 |
+
typedef T type;
|
| 60 |
+
};
|
| 61 |
+
template <typename T> struct GetType<volatile T*>
|
| 62 |
+
{
|
| 63 |
+
typedef T type;
|
| 64 |
+
};
|
| 65 |
+
template <typename T> struct GetType<T&>
|
| 66 |
+
{
|
| 67 |
+
typedef T type;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
template <unsigned int I, unsigned int N>
|
| 71 |
+
struct For
|
| 72 |
+
{
|
| 73 |
+
template <class PointerTuple, class ValTuple>
|
| 74 |
+
static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
|
| 75 |
+
{
|
| 76 |
+
thrust::get<I>(smem)[tid] = thrust::get<I>(val);
|
| 77 |
+
|
| 78 |
+
For<I + 1, N>::loadToSmem(smem, val, tid);
|
| 79 |
+
}
|
| 80 |
+
template <class PointerTuple, class ValTuple>
|
| 81 |
+
static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
|
| 82 |
+
{
|
| 83 |
+
thrust::get<I>(val) = thrust::get<I>(smem)[tid];
|
| 84 |
+
|
| 85 |
+
For<I + 1, N>::loadFromSmem(smem, val, tid);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template <class PointerTuple, class ValTuple, class OpTuple>
|
| 89 |
+
static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op)
|
| 90 |
+
{
|
| 91 |
+
typename GetType<typename thrust::tuple_element<I, PointerTuple>::type>::type reg = thrust::get<I>(smem)[tid + delta];
|
| 92 |
+
thrust::get<I>(smem)[tid] = thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
|
| 93 |
+
|
| 94 |
+
For<I + 1, N>::merge(smem, val, tid, delta, op);
|
| 95 |
+
}
|
| 96 |
+
template <class ValTuple, class OpTuple>
|
| 97 |
+
static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op)
|
| 98 |
+
{
|
| 99 |
+
typename GetType<typename thrust::tuple_element<I, ValTuple>::type>::type reg = shfl_down(thrust::get<I>(val), delta, width);
|
| 100 |
+
thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
|
| 101 |
+
|
| 102 |
+
For<I + 1, N>::mergeShfl(val, delta, width, op);
|
| 103 |
+
}
|
| 104 |
+
};
|
| 105 |
+
template <unsigned int N>
|
| 106 |
+
struct For<N, N>
|
| 107 |
+
{
|
| 108 |
+
template <class PointerTuple, class ValTuple>
|
| 109 |
+
static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int)
|
| 110 |
+
{
|
| 111 |
+
}
|
| 112 |
+
template <class PointerTuple, class ValTuple>
|
| 113 |
+
static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int)
|
| 114 |
+
{
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
template <class PointerTuple, class ValTuple, class OpTuple>
|
| 118 |
+
static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&)
|
| 119 |
+
{
|
| 120 |
+
}
|
| 121 |
+
template <class ValTuple, class OpTuple>
|
| 122 |
+
static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&)
|
| 123 |
+
{
|
| 124 |
+
}
|
| 125 |
+
};
|
| 126 |
+
|
| 127 |
+
template <typename T>
|
| 128 |
+
__device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid)
|
| 129 |
+
{
|
| 130 |
+
smem[tid] = val;
|
| 131 |
+
}
|
| 132 |
+
template <typename T>
|
| 133 |
+
__device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid)
|
| 134 |
+
{
|
| 135 |
+
val = smem[tid];
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <typename T, class Op>
|
| 139 |
+
__device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op)
|
| 140 |
+
{
|
| 141 |
+
T reg = smem[tid + delta];
|
| 142 |
+
smem[tid] = val = op(val, reg);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <typename T, class Op>
|
| 146 |
+
__device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op)
|
| 147 |
+
{
|
| 148 |
+
T reg = shfl_down(val, delta, width);
|
| 149 |
+
val = op(val, reg);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
#if (CUDART_VERSION < 12040) // details: https://github.com/opencv/opencv_contrib/issues/3690
|
| 153 |
+
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
|
| 154 |
+
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
|
| 155 |
+
__device__ __forceinline__ void loadToSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
|
| 156 |
+
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
|
| 157 |
+
unsigned int tid)
|
| 158 |
+
{
|
| 159 |
+
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadToSmem(smem, val, tid);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
|
| 163 |
+
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
|
| 164 |
+
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
|
| 165 |
+
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
|
| 166 |
+
unsigned int tid)
|
| 167 |
+
{
|
| 168 |
+
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadFromSmem(smem, val, tid);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
|
| 172 |
+
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
|
| 173 |
+
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
|
| 174 |
+
__device__ __forceinline__ void merge(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
|
| 175 |
+
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
|
| 176 |
+
unsigned int tid,
|
| 177 |
+
unsigned int delta,
|
| 178 |
+
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
|
| 179 |
+
{
|
| 180 |
+
For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::merge(smem, val, tid, delta, op);
|
| 181 |
+
}
|
| 182 |
+
template <typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
|
| 183 |
+
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
|
| 184 |
+
__device__ __forceinline__ void mergeShfl(const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
|
| 185 |
+
unsigned int delta,
|
| 186 |
+
unsigned int width,
|
| 187 |
+
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
|
| 188 |
+
{
|
| 189 |
+
For<0, thrust::tuple_size<thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9> >::value>::mergeShfl(val, delta, width, op);
|
| 190 |
+
}
|
| 191 |
+
#else
|
| 192 |
+
template <typename... P, typename... R>
|
| 193 |
+
__device__ __forceinline__ void loadToSmem(const thrust::tuple<P...>& smem, const thrust::tuple<R...>& val, unsigned int tid)
|
| 194 |
+
{
|
| 195 |
+
For<0, thrust::tuple_size<thrust::tuple<P...> >::value>::loadToSmem(smem, val, tid);
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
template <typename... P, typename... R>
|
| 199 |
+
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<P...>& smem, const thrust::tuple<R...>& val, unsigned int tid)
|
| 200 |
+
{
|
| 201 |
+
For<0, thrust::tuple_size<thrust::tuple<P...> >::value>::loadFromSmem(smem, val, tid);
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
template <typename... P, typename... R, class... Op>
|
| 205 |
+
__device__ __forceinline__ void merge(const thrust::tuple<P...>& smem, const thrust::tuple<R...>& val, unsigned int tid, unsigned int delta, const thrust::tuple<Op...>& op)
|
| 206 |
+
{
|
| 207 |
+
For<0, thrust::tuple_size<thrust::tuple<P...> >::value>::merge(smem, val, tid, delta, op);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
template <typename... R, class... Op>
|
| 211 |
+
__device__ __forceinline__ void mergeShfl(const thrust::tuple<R...>& val, unsigned int delta, unsigned int width, const thrust::tuple<Op...>& op)
|
| 212 |
+
{
|
| 213 |
+
For<0, thrust::tuple_size<thrust::tuple<R...> >::value>::mergeShfl(val, delta, width, op);
|
| 214 |
+
}
|
| 215 |
+
#endif
|
| 216 |
+
template <unsigned int N> struct Generic
|
| 217 |
+
{
|
| 218 |
+
template <typename Pointer, typename Reference, class Op>
|
| 219 |
+
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
|
| 220 |
+
{
|
| 221 |
+
loadToSmem(smem, val, tid);
|
| 222 |
+
if (N >= 32)
|
| 223 |
+
__syncthreads();
|
| 224 |
+
|
| 225 |
+
if (N >= 2048)
|
| 226 |
+
{
|
| 227 |
+
if (tid < 1024)
|
| 228 |
+
merge(smem, val, tid, 1024, op);
|
| 229 |
+
|
| 230 |
+
__syncthreads();
|
| 231 |
+
}
|
| 232 |
+
if (N >= 1024)
|
| 233 |
+
{
|
| 234 |
+
if (tid < 512)
|
| 235 |
+
merge(smem, val, tid, 512, op);
|
| 236 |
+
|
| 237 |
+
__syncthreads();
|
| 238 |
+
}
|
| 239 |
+
if (N >= 512)
|
| 240 |
+
{
|
| 241 |
+
if (tid < 256)
|
| 242 |
+
merge(smem, val, tid, 256, op);
|
| 243 |
+
|
| 244 |
+
__syncthreads();
|
| 245 |
+
}
|
| 246 |
+
if (N >= 256)
|
| 247 |
+
{
|
| 248 |
+
if (tid < 128)
|
| 249 |
+
merge(smem, val, tid, 128, op);
|
| 250 |
+
|
| 251 |
+
__syncthreads();
|
| 252 |
+
}
|
| 253 |
+
if (N >= 128)
|
| 254 |
+
{
|
| 255 |
+
if (tid < 64)
|
| 256 |
+
merge(smem, val, tid, 64, op);
|
| 257 |
+
|
| 258 |
+
__syncthreads();
|
| 259 |
+
}
|
| 260 |
+
if (N >= 64)
|
| 261 |
+
{
|
| 262 |
+
if (tid < 32)
|
| 263 |
+
merge(smem, val, tid, 32, op);
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
if (tid < 16)
|
| 267 |
+
{
|
| 268 |
+
merge(smem, val, tid, 16, op);
|
| 269 |
+
merge(smem, val, tid, 8, op);
|
| 270 |
+
merge(smem, val, tid, 4, op);
|
| 271 |
+
merge(smem, val, tid, 2, op);
|
| 272 |
+
merge(smem, val, tid, 1, op);
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
};
|
| 276 |
+
|
| 277 |
+
template <unsigned int I, typename Pointer, typename Reference, class Op>
|
| 278 |
+
struct Unroll
|
| 279 |
+
{
|
| 280 |
+
static __device__ void loopShfl(Reference val, Op op, unsigned int N)
|
| 281 |
+
{
|
| 282 |
+
mergeShfl(val, I, N, op);
|
| 283 |
+
Unroll<I / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
|
| 284 |
+
}
|
| 285 |
+
static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op)
|
| 286 |
+
{
|
| 287 |
+
merge(smem, val, tid, I, op);
|
| 288 |
+
Unroll<I / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
|
| 289 |
+
}
|
| 290 |
+
};
|
| 291 |
+
template <typename Pointer, typename Reference, class Op>
|
| 292 |
+
struct Unroll<0, Pointer, Reference, Op>
|
| 293 |
+
{
|
| 294 |
+
static __device__ void loopShfl(Reference, Op, unsigned int)
|
| 295 |
+
{
|
| 296 |
+
}
|
| 297 |
+
static __device__ void loop(Pointer, Reference, unsigned int, Op)
|
| 298 |
+
{
|
| 299 |
+
}
|
| 300 |
+
};
|
| 301 |
+
|
| 302 |
+
template <unsigned int N> struct WarpOptimized
|
| 303 |
+
{
|
| 304 |
+
template <typename Pointer, typename Reference, class Op>
|
| 305 |
+
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
|
| 306 |
+
{
|
| 307 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 308 |
+
CV_UNUSED(smem);
|
| 309 |
+
CV_UNUSED(tid);
|
| 310 |
+
|
| 311 |
+
Unroll<N / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
|
| 312 |
+
#else
|
| 313 |
+
loadToSmem(smem, val, tid);
|
| 314 |
+
|
| 315 |
+
if (tid < N / 2)
|
| 316 |
+
Unroll<N / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
|
| 317 |
+
#endif
|
| 318 |
+
}
|
| 319 |
+
};
|
| 320 |
+
|
| 321 |
+
template <unsigned int N> struct GenericOptimized32
|
| 322 |
+
{
|
| 323 |
+
enum { M = N / 32 };
|
| 324 |
+
|
| 325 |
+
template <typename Pointer, typename Reference, class Op>
|
| 326 |
+
static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
|
| 327 |
+
{
|
| 328 |
+
const unsigned int laneId = Warp::laneId();
|
| 329 |
+
|
| 330 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 331 |
+
Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize);
|
| 332 |
+
|
| 333 |
+
if (laneId == 0)
|
| 334 |
+
loadToSmem(smem, val, tid / 32);
|
| 335 |
+
#else
|
| 336 |
+
loadToSmem(smem, val, tid);
|
| 337 |
+
|
| 338 |
+
if (laneId < 16)
|
| 339 |
+
Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op);
|
| 340 |
+
|
| 341 |
+
__syncthreads();
|
| 342 |
+
|
| 343 |
+
if (laneId == 0)
|
| 344 |
+
loadToSmem(smem, val, tid / 32);
|
| 345 |
+
#endif
|
| 346 |
+
|
| 347 |
+
__syncthreads();
|
| 348 |
+
|
| 349 |
+
loadFromSmem(smem, val, tid);
|
| 350 |
+
|
| 351 |
+
if (tid < 32)
|
| 352 |
+
{
|
| 353 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 354 |
+
Unroll<M / 2, Pointer, Reference, Op>::loopShfl(val, op, M);
|
| 355 |
+
#else
|
| 356 |
+
Unroll<M / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
|
| 357 |
+
#endif
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
};
|
| 361 |
+
|
| 362 |
+
template <bool val, class T1, class T2> struct StaticIf;
|
| 363 |
+
template <class T1, class T2> struct StaticIf<true, T1, T2>
|
| 364 |
+
{
|
| 365 |
+
typedef T1 type;
|
| 366 |
+
};
|
| 367 |
+
template <class T1, class T2> struct StaticIf<false, T1, T2>
|
| 368 |
+
{
|
| 369 |
+
typedef T2 type;
|
| 370 |
+
};
|
| 371 |
+
|
| 372 |
+
template <unsigned int N> struct IsPowerOf2
|
| 373 |
+
{
|
| 374 |
+
enum { value = ((N != 0) && !(N & (N - 1))) };
|
| 375 |
+
};
|
| 376 |
+
|
| 377 |
+
template <unsigned int N> struct Dispatcher
|
| 378 |
+
{
|
| 379 |
+
typedef typename StaticIf<
|
| 380 |
+
(N <= 32) && IsPowerOf2<N>::value,
|
| 381 |
+
WarpOptimized<N>,
|
| 382 |
+
typename StaticIf<
|
| 383 |
+
(N <= 1024) && IsPowerOf2<N>::value,
|
| 384 |
+
GenericOptimized32<N>,
|
| 385 |
+
Generic<N>
|
| 386 |
+
>::type
|
| 387 |
+
>::type reductor;
|
| 388 |
+
};
|
| 389 |
+
}
|
| 390 |
+
}}}
|
| 391 |
+
|
| 392 |
+
//! @endcond
|
| 393 |
+
|
| 394 |
+
#endif // OPENCV_CUDA_REDUCE_DETAIL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/detail/reduce_key_val.hpp
ADDED
|
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP
|
| 44 |
+
#define OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP
|
| 45 |
+
|
| 46 |
+
#include <thrust/tuple.h>
|
| 47 |
+
#include "../warp.hpp"
|
| 48 |
+
#include "../warp_shuffle.hpp"
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
namespace reduce_key_val_detail
|
| 55 |
+
{
|
| 56 |
+
template <typename T> struct GetType;
|
| 57 |
+
template <typename T> struct GetType<T*>
|
| 58 |
+
{
|
| 59 |
+
typedef T type;
|
| 60 |
+
};
|
| 61 |
+
template <typename T> struct GetType<volatile T*>
|
| 62 |
+
{
|
| 63 |
+
typedef T type;
|
| 64 |
+
};
|
| 65 |
+
template <typename T> struct GetType<T&>
|
| 66 |
+
{
|
| 67 |
+
typedef T type;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
template <unsigned int I, unsigned int N>
|
| 71 |
+
struct For
|
| 72 |
+
{
|
| 73 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 74 |
+
static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
|
| 75 |
+
{
|
| 76 |
+
thrust::get<I>(smem)[tid] = thrust::get<I>(data);
|
| 77 |
+
|
| 78 |
+
For<I + 1, N>::loadToSmem(smem, data, tid);
|
| 79 |
+
}
|
| 80 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 81 |
+
static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
|
| 82 |
+
{
|
| 83 |
+
thrust::get<I>(data) = thrust::get<I>(smem)[tid];
|
| 84 |
+
|
| 85 |
+
For<I + 1, N>::loadFromSmem(smem, data, tid);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template <class ReferenceTuple>
|
| 89 |
+
static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width)
|
| 90 |
+
{
|
| 91 |
+
thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
|
| 92 |
+
|
| 93 |
+
For<I + 1, N>::copyShfl(val, delta, width);
|
| 94 |
+
}
|
| 95 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 96 |
+
static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta)
|
| 97 |
+
{
|
| 98 |
+
thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
|
| 99 |
+
|
| 100 |
+
For<I + 1, N>::copy(svals, val, tid, delta);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
|
| 104 |
+
static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width)
|
| 105 |
+
{
|
| 106 |
+
typename GetType<typename thrust::tuple_element<I, KeyReferenceTuple>::type>::type reg = shfl_down(thrust::get<I>(key), delta, width);
|
| 107 |
+
|
| 108 |
+
if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
|
| 109 |
+
{
|
| 110 |
+
thrust::get<I>(key) = reg;
|
| 111 |
+
thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
For<I + 1, N>::mergeShfl(key, val, cmp, delta, width);
|
| 115 |
+
}
|
| 116 |
+
template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
|
| 117 |
+
static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key,
|
| 118 |
+
const ValPointerTuple& svals, const ValReferenceTuple& val,
|
| 119 |
+
const CmpTuple& cmp,
|
| 120 |
+
unsigned int tid, unsigned int delta)
|
| 121 |
+
{
|
| 122 |
+
typename GetType<typename thrust::tuple_element<I, KeyPointerTuple>::type>::type reg = thrust::get<I>(skeys)[tid + delta];
|
| 123 |
+
|
| 124 |
+
if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
|
| 125 |
+
{
|
| 126 |
+
thrust::get<I>(skeys)[tid] = thrust::get<I>(key) = reg;
|
| 127 |
+
thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
For<I + 1, N>::merge(skeys, key, svals, val, cmp, tid, delta);
|
| 131 |
+
}
|
| 132 |
+
};
|
| 133 |
+
template <unsigned int N>
|
| 134 |
+
struct For<N, N>
|
| 135 |
+
{
|
| 136 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 137 |
+
static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
|
| 138 |
+
{
|
| 139 |
+
}
|
| 140 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 141 |
+
static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
|
| 142 |
+
{
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <class ReferenceTuple>
|
| 146 |
+
static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int)
|
| 147 |
+
{
|
| 148 |
+
}
|
| 149 |
+
template <class PointerTuple, class ReferenceTuple>
|
| 150 |
+
static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int)
|
| 151 |
+
{
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
|
| 155 |
+
static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int)
|
| 156 |
+
{
|
| 157 |
+
}
|
| 158 |
+
template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
|
| 159 |
+
static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&,
|
| 160 |
+
const ValPointerTuple&, const ValReferenceTuple&,
|
| 161 |
+
const CmpTuple&,
|
| 162 |
+
unsigned int, unsigned int)
|
| 163 |
+
{
|
| 164 |
+
}
|
| 165 |
+
};
|
| 166 |
+
|
| 167 |
+
//////////////////////////////////////////////////////
|
| 168 |
+
// loadToSmem
|
| 169 |
+
|
| 170 |
+
template <typename T>
|
| 171 |
+
__device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid)
|
| 172 |
+
{
|
| 173 |
+
smem[tid] = data;
|
| 174 |
+
}
|
| 175 |
+
template <typename T>
|
| 176 |
+
__device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid)
|
| 177 |
+
{
|
| 178 |
+
data = smem[tid];
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
#if (CUDART_VERSION < 12040)
|
| 182 |
+
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 183 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
|
| 184 |
+
__device__ __forceinline__ void loadToSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
|
| 185 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
|
| 186 |
+
unsigned int tid)
|
| 187 |
+
{
|
| 188 |
+
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadToSmem(smem, data, tid);
|
| 189 |
+
}
|
| 190 |
+
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 191 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
|
| 192 |
+
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
|
| 193 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
|
| 194 |
+
unsigned int tid)
|
| 195 |
+
{
|
| 196 |
+
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadFromSmem(smem, data, tid);
|
| 197 |
+
}
|
| 198 |
+
#else
|
| 199 |
+
template <typename... VP, typename... VR>
|
| 200 |
+
__device__ __forceinline__ void loadToSmem(const thrust::tuple<VP...>& smem, const thrust::tuple<VR...>& data, unsigned int tid)
|
| 201 |
+
{
|
| 202 |
+
For<0, thrust::tuple_size<thrust::tuple<VP...> >::value>::loadToSmem(smem, data, tid);
|
| 203 |
+
}
|
| 204 |
+
template <typename... VP, typename... VR>
|
| 205 |
+
__device__ __forceinline__ void loadFromSmem(const thrust::tuple<VP...>& smem, const thrust::tuple<VR...>& data, unsigned int tid)
|
| 206 |
+
{
|
| 207 |
+
For<0, thrust::tuple_size<thrust::tuple<VP...> >::value>::loadFromSmem(smem, data, tid);
|
| 208 |
+
}
|
| 209 |
+
#endif
|
| 210 |
+
|
| 211 |
+
template <typename V>
|
| 212 |
+
__device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width)
|
| 213 |
+
{
|
| 214 |
+
val = shfl_down(val, delta, width);
|
| 215 |
+
}
|
| 216 |
+
template <typename V>
|
| 217 |
+
__device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta)
|
| 218 |
+
{
|
| 219 |
+
svals[tid] = val = svals[tid + delta];
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
template <typename K, typename V, class Cmp>
|
| 223 |
+
__device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width)
|
| 224 |
+
{
|
| 225 |
+
K reg = shfl_down(key, delta, width);
|
| 226 |
+
|
| 227 |
+
if (cmp(reg, key))
|
| 228 |
+
{
|
| 229 |
+
key = reg;
|
| 230 |
+
copyValsShfl(val, delta, width);
|
| 231 |
+
}
|
| 232 |
+
}
|
| 233 |
+
template <typename K, typename V, class Cmp>
|
| 234 |
+
__device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta)
|
| 235 |
+
{
|
| 236 |
+
K reg = skeys[tid + delta];
|
| 237 |
+
|
| 238 |
+
if (cmp(reg, key))
|
| 239 |
+
{
|
| 240 |
+
skeys[tid] = key = reg;
|
| 241 |
+
copyVals(svals, val, tid, delta);
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
#if (CUDART_VERSION < 12040) // details: https://github.com/opencv/opencv_contrib/issues/3690
|
| 246 |
+
template <typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
|
| 247 |
+
__device__ __forceinline__ void copyValsShfl(const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 248 |
+
unsigned int delta,
|
| 249 |
+
int width)
|
| 250 |
+
{
|
| 251 |
+
For<0, thrust::tuple_size<thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9> >::value>::copyShfl(val, delta, width);
|
| 252 |
+
}
|
| 253 |
+
template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 254 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
|
| 255 |
+
__device__ __forceinline__ void copyVals(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
|
| 256 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 257 |
+
unsigned int tid, unsigned int delta)
|
| 258 |
+
{
|
| 259 |
+
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::copy(svals, val, tid, delta);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
template <typename K,
|
| 263 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 264 |
+
class Cmp>
|
| 265 |
+
__device__ __forceinline__ void mergeShfl(K& key,
|
| 266 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 267 |
+
const Cmp& cmp,
|
| 268 |
+
unsigned int delta, int width)
|
| 269 |
+
{
|
| 270 |
+
K reg = shfl_down(key, delta, width);
|
| 271 |
+
|
| 272 |
+
if (cmp(reg, key))
|
| 273 |
+
{
|
| 274 |
+
key = reg;
|
| 275 |
+
copyValsShfl(val, delta, width);
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
template <typename K,
|
| 279 |
+
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 280 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 281 |
+
class Cmp>
|
| 282 |
+
__device__ __forceinline__ void merge(volatile K* skeys, K& key,
|
| 283 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
|
| 284 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 285 |
+
const Cmp& cmp, unsigned int tid, unsigned int delta)
|
| 286 |
+
{
|
| 287 |
+
K reg = skeys[tid + delta];
|
| 288 |
+
|
| 289 |
+
if (cmp(reg, key))
|
| 290 |
+
{
|
| 291 |
+
skeys[tid] = key = reg;
|
| 292 |
+
copyVals(svals, val, tid, delta);
|
| 293 |
+
}
|
| 294 |
+
}
|
| 295 |
+
template <typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
|
| 296 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 297 |
+
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
|
| 298 |
+
__device__ __forceinline__ void mergeShfl(const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
|
| 299 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 300 |
+
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
|
| 301 |
+
unsigned int delta, int width)
|
| 302 |
+
{
|
| 303 |
+
For<0, thrust::tuple_size<thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9> >::value>::mergeShfl(key, val, cmp, delta, width);
|
| 304 |
+
}
|
| 305 |
+
template <typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
|
| 306 |
+
typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
|
| 307 |
+
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 308 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 309 |
+
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
|
| 310 |
+
__device__ __forceinline__ void merge(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
|
| 311 |
+
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
|
| 312 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
|
| 313 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 314 |
+
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
|
| 315 |
+
unsigned int tid, unsigned int delta)
|
| 316 |
+
{
|
| 317 |
+
For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::merge(skeys, key, svals, val, cmp, tid, delta);
|
| 318 |
+
}
|
| 319 |
+
#else
|
| 320 |
+
template <typename... VR>
|
| 321 |
+
__device__ __forceinline__ void copyValsShfl(const thrust::tuple<VR...>& val, unsigned int delta, int width)
|
| 322 |
+
{
|
| 323 |
+
For<0, thrust::tuple_size<thrust::tuple<VR...> >::value>::copyShfl(val, delta, width);
|
| 324 |
+
}
|
| 325 |
+
template <typename... VP, typename... VR>
|
| 326 |
+
__device__ __forceinline__ void copyVals(const thrust::tuple<VP...>& svals, const thrust::tuple<VR...>& val, unsigned int tid, unsigned int delta)
|
| 327 |
+
{
|
| 328 |
+
For<0, thrust::tuple_size<thrust::tuple<VP...> >::value>::copy(svals, val, tid, delta);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
template <typename K, typename... VR, class Cmp>
|
| 332 |
+
__device__ __forceinline__ void mergeShfl(K& key, const thrust::tuple<VR...>& val, const Cmp& cmp, unsigned int delta, int width)
|
| 333 |
+
{
|
| 334 |
+
K reg = shfl_down(key, delta, width);
|
| 335 |
+
|
| 336 |
+
if (cmp(reg, key))
|
| 337 |
+
{
|
| 338 |
+
key = reg;
|
| 339 |
+
copyValsShfl(val, delta, width);
|
| 340 |
+
}
|
| 341 |
+
}
|
| 342 |
+
template <typename K, typename... VP, typename... VR, class Cmp>
|
| 343 |
+
__device__ __forceinline__ void merge(volatile K* skeys, K& key, const thrust::tuple<VP...>& svals,
|
| 344 |
+
const thrust::tuple<VR...>& val, const Cmp& cmp, unsigned int tid, unsigned int delta)
|
| 345 |
+
{
|
| 346 |
+
K reg = skeys[tid + delta];
|
| 347 |
+
|
| 348 |
+
if (cmp(reg, key))
|
| 349 |
+
{
|
| 350 |
+
skeys[tid] = key = reg;
|
| 351 |
+
copyVals(svals, val, tid, delta);
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
template <typename... KR, typename... VR, class... Cmp>
|
| 355 |
+
__device__ __forceinline__ void mergeShfl(const thrust::tuple<KR...>& key,
|
| 356 |
+
const thrust::tuple<VR...>& val,
|
| 357 |
+
const thrust::tuple<Cmp...>& cmp,
|
| 358 |
+
unsigned int delta, int width)
|
| 359 |
+
{
|
| 360 |
+
For<0, thrust::tuple_size<thrust::tuple<KR...> >::value>::mergeShfl(key, val, cmp, delta, width);
|
| 361 |
+
}
|
| 362 |
+
template <typename... KP, typename... KR, typename... VP, typename... VR, class... Cmp>
|
| 363 |
+
__device__ __forceinline__ void merge(const thrust::tuple<KP...>& skeys,
|
| 364 |
+
const thrust::tuple<KR...>& key,
|
| 365 |
+
const thrust::tuple<VP...>& svals,
|
| 366 |
+
const thrust::tuple<VR...>& val,
|
| 367 |
+
const thrust::tuple<Cmp...>& cmp,
|
| 368 |
+
unsigned int tid, unsigned int delta)
|
| 369 |
+
{
|
| 370 |
+
For<0, thrust::tuple_size<thrust::tuple<VP...> >::value>::merge(skeys, key, svals, val, cmp, tid, delta);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
#endif
|
| 374 |
+
//////////////////////////////////////////////////////
|
| 375 |
+
// Generic
|
| 376 |
+
|
| 377 |
+
template <unsigned int N> struct Generic
|
| 378 |
+
{
|
| 379 |
+
template <class KP, class KR, class VP, class VR, class Cmp>
|
| 380 |
+
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
|
| 381 |
+
{
|
| 382 |
+
loadToSmem(skeys, key, tid);
|
| 383 |
+
loadValsToSmem(svals, val, tid);
|
| 384 |
+
if (N >= 32)
|
| 385 |
+
__syncthreads();
|
| 386 |
+
|
| 387 |
+
if (N >= 2048)
|
| 388 |
+
{
|
| 389 |
+
if (tid < 1024)
|
| 390 |
+
merge(skeys, key, svals, val, cmp, tid, 1024);
|
| 391 |
+
|
| 392 |
+
__syncthreads();
|
| 393 |
+
}
|
| 394 |
+
if (N >= 1024)
|
| 395 |
+
{
|
| 396 |
+
if (tid < 512)
|
| 397 |
+
merge(skeys, key, svals, val, cmp, tid, 512);
|
| 398 |
+
|
| 399 |
+
__syncthreads();
|
| 400 |
+
}
|
| 401 |
+
if (N >= 512)
|
| 402 |
+
{
|
| 403 |
+
if (tid < 256)
|
| 404 |
+
merge(skeys, key, svals, val, cmp, tid, 256);
|
| 405 |
+
|
| 406 |
+
__syncthreads();
|
| 407 |
+
}
|
| 408 |
+
if (N >= 256)
|
| 409 |
+
{
|
| 410 |
+
if (tid < 128)
|
| 411 |
+
merge(skeys, key, svals, val, cmp, tid, 128);
|
| 412 |
+
|
| 413 |
+
__syncthreads();
|
| 414 |
+
}
|
| 415 |
+
if (N >= 128)
|
| 416 |
+
{
|
| 417 |
+
if (tid < 64)
|
| 418 |
+
merge(skeys, key, svals, val, cmp, tid, 64);
|
| 419 |
+
|
| 420 |
+
__syncthreads();
|
| 421 |
+
}
|
| 422 |
+
if (N >= 64)
|
| 423 |
+
{
|
| 424 |
+
if (tid < 32)
|
| 425 |
+
merge(skeys, key, svals, val, cmp, tid, 32);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
if (tid < 16)
|
| 429 |
+
{
|
| 430 |
+
merge(skeys, key, svals, val, cmp, tid, 16);
|
| 431 |
+
merge(skeys, key, svals, val, cmp, tid, 8);
|
| 432 |
+
merge(skeys, key, svals, val, cmp, tid, 4);
|
| 433 |
+
merge(skeys, key, svals, val, cmp, tid, 2);
|
| 434 |
+
merge(skeys, key, svals, val, cmp, tid, 1);
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
};
|
| 438 |
+
|
| 439 |
+
template <unsigned int I, class KP, class KR, class VP, class VR, class Cmp>
|
| 440 |
+
struct Unroll
|
| 441 |
+
{
|
| 442 |
+
static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N)
|
| 443 |
+
{
|
| 444 |
+
mergeShfl(key, val, cmp, I, N);
|
| 445 |
+
Unroll<I / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
|
| 446 |
+
}
|
| 447 |
+
static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
|
| 448 |
+
{
|
| 449 |
+
merge(skeys, key, svals, val, cmp, tid, I);
|
| 450 |
+
Unroll<I / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
|
| 451 |
+
}
|
| 452 |
+
};
|
| 453 |
+
template <class KP, class KR, class VP, class VR, class Cmp>
|
| 454 |
+
struct Unroll<0, KP, KR, VP, VR, Cmp>
|
| 455 |
+
{
|
| 456 |
+
static __device__ void loopShfl(KR, VR, Cmp, unsigned int)
|
| 457 |
+
{
|
| 458 |
+
}
|
| 459 |
+
static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp)
|
| 460 |
+
{
|
| 461 |
+
}
|
| 462 |
+
};
|
| 463 |
+
|
| 464 |
+
template <unsigned int N> struct WarpOptimized
|
| 465 |
+
{
|
| 466 |
+
template <class KP, class KR, class VP, class VR, class Cmp>
|
| 467 |
+
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
|
| 468 |
+
{
|
| 469 |
+
#if 0 // __CUDA_ARCH__ >= 300
|
| 470 |
+
CV_UNUSED(skeys);
|
| 471 |
+
CV_UNUSED(svals);
|
| 472 |
+
CV_UNUSED(tid);
|
| 473 |
+
|
| 474 |
+
Unroll<N / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
|
| 475 |
+
#else
|
| 476 |
+
loadToSmem(skeys, key, tid);
|
| 477 |
+
loadToSmem(svals, val, tid);
|
| 478 |
+
|
| 479 |
+
if (tid < N / 2)
|
| 480 |
+
Unroll<N / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
|
| 481 |
+
#endif
|
| 482 |
+
}
|
| 483 |
+
};
|
| 484 |
+
|
| 485 |
+
template <unsigned int N> struct GenericOptimized32
|
| 486 |
+
{
|
| 487 |
+
enum { M = N / 32 };
|
| 488 |
+
|
| 489 |
+
template <class KP, class KR, class VP, class VR, class Cmp>
|
| 490 |
+
static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
|
| 491 |
+
{
|
| 492 |
+
const unsigned int laneId = Warp::laneId();
|
| 493 |
+
|
| 494 |
+
#if 0 // __CUDA_ARCH__ >= 300
|
| 495 |
+
Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize);
|
| 496 |
+
|
| 497 |
+
if (laneId == 0)
|
| 498 |
+
{
|
| 499 |
+
loadToSmem(skeys, key, tid / 32);
|
| 500 |
+
loadToSmem(svals, val, tid / 32);
|
| 501 |
+
}
|
| 502 |
+
#else
|
| 503 |
+
loadToSmem(skeys, key, tid);
|
| 504 |
+
loadToSmem(svals, val, tid);
|
| 505 |
+
|
| 506 |
+
if (laneId < 16)
|
| 507 |
+
Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
|
| 508 |
+
|
| 509 |
+
__syncthreads();
|
| 510 |
+
|
| 511 |
+
if (laneId == 0)
|
| 512 |
+
{
|
| 513 |
+
loadToSmem(skeys, key, tid / 32);
|
| 514 |
+
loadToSmem(svals, val, tid / 32);
|
| 515 |
+
}
|
| 516 |
+
#endif
|
| 517 |
+
|
| 518 |
+
__syncthreads();
|
| 519 |
+
|
| 520 |
+
loadFromSmem(skeys, key, tid);
|
| 521 |
+
|
| 522 |
+
if (tid < 32)
|
| 523 |
+
{
|
| 524 |
+
#if 0 // __CUDA_ARCH__ >= 300
|
| 525 |
+
loadFromSmem(svals, val, tid);
|
| 526 |
+
|
| 527 |
+
Unroll<M / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, M);
|
| 528 |
+
#else
|
| 529 |
+
Unroll<M / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
|
| 530 |
+
#endif
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
};
|
| 534 |
+
|
| 535 |
+
template <bool val, class T1, class T2> struct StaticIf;
|
| 536 |
+
template <class T1, class T2> struct StaticIf<true, T1, T2>
|
| 537 |
+
{
|
| 538 |
+
typedef T1 type;
|
| 539 |
+
};
|
| 540 |
+
template <class T1, class T2> struct StaticIf<false, T1, T2>
|
| 541 |
+
{
|
| 542 |
+
typedef T2 type;
|
| 543 |
+
};
|
| 544 |
+
|
| 545 |
+
template <unsigned int N> struct IsPowerOf2
|
| 546 |
+
{
|
| 547 |
+
enum { value = ((N != 0) && !(N & (N - 1))) };
|
| 548 |
+
};
|
| 549 |
+
|
| 550 |
+
template <unsigned int N> struct Dispatcher
|
| 551 |
+
{
|
| 552 |
+
typedef typename StaticIf<
|
| 553 |
+
(N <= 32) && IsPowerOf2<N>::value,
|
| 554 |
+
WarpOptimized<N>,
|
| 555 |
+
typename StaticIf<
|
| 556 |
+
(N <= 1024) && IsPowerOf2<N>::value,
|
| 557 |
+
GenericOptimized32<N>,
|
| 558 |
+
Generic<N>
|
| 559 |
+
>::type
|
| 560 |
+
>::type reductor;
|
| 561 |
+
};
|
| 562 |
+
}
|
| 563 |
+
}}}
|
| 564 |
+
|
| 565 |
+
//! @endcond
|
| 566 |
+
|
| 567 |
+
#endif // OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/detail/transform_detail.hpp
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_TRANSFORM_DETAIL_HPP
|
| 44 |
+
#define OPENCV_CUDA_TRANSFORM_DETAIL_HPP
|
| 45 |
+
|
| 46 |
+
#include "../common.hpp"
|
| 47 |
+
#include "../vec_traits.hpp"
|
| 48 |
+
#include "../functional.hpp"
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
namespace transform_detail
|
| 55 |
+
{
|
| 56 |
+
//! Read Write Traits
|
| 57 |
+
|
| 58 |
+
template <typename T, typename D, int shift> struct UnaryReadWriteTraits
|
| 59 |
+
{
|
| 60 |
+
typedef typename TypeVec<T, shift>::vec_type read_type;
|
| 61 |
+
typedef typename TypeVec<D, shift>::vec_type write_type;
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
template <typename T1, typename T2, typename D, int shift> struct BinaryReadWriteTraits
|
| 65 |
+
{
|
| 66 |
+
typedef typename TypeVec<T1, shift>::vec_type read_type1;
|
| 67 |
+
typedef typename TypeVec<T2, shift>::vec_type read_type2;
|
| 68 |
+
typedef typename TypeVec<D, shift>::vec_type write_type;
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
//! Transform kernels
|
| 72 |
+
|
| 73 |
+
template <int shift> struct OpUnroller;
|
| 74 |
+
template <> struct OpUnroller<1>
|
| 75 |
+
{
|
| 76 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 77 |
+
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
|
| 78 |
+
{
|
| 79 |
+
if (mask(y, x_shifted))
|
| 80 |
+
dst.x = op(src.x);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 84 |
+
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
|
| 85 |
+
{
|
| 86 |
+
if (mask(y, x_shifted))
|
| 87 |
+
dst.x = op(src1.x, src2.x);
|
| 88 |
+
}
|
| 89 |
+
};
|
| 90 |
+
template <> struct OpUnroller<2>
|
| 91 |
+
{
|
| 92 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 93 |
+
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
|
| 94 |
+
{
|
| 95 |
+
if (mask(y, x_shifted))
|
| 96 |
+
dst.x = op(src.x);
|
| 97 |
+
if (mask(y, x_shifted + 1))
|
| 98 |
+
dst.y = op(src.y);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 102 |
+
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
|
| 103 |
+
{
|
| 104 |
+
if (mask(y, x_shifted))
|
| 105 |
+
dst.x = op(src1.x, src2.x);
|
| 106 |
+
if (mask(y, x_shifted + 1))
|
| 107 |
+
dst.y = op(src1.y, src2.y);
|
| 108 |
+
}
|
| 109 |
+
};
|
| 110 |
+
template <> struct OpUnroller<3>
|
| 111 |
+
{
|
| 112 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 113 |
+
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
|
| 114 |
+
{
|
| 115 |
+
if (mask(y, x_shifted))
|
| 116 |
+
dst.x = op(src.x);
|
| 117 |
+
if (mask(y, x_shifted + 1))
|
| 118 |
+
dst.y = op(src.y);
|
| 119 |
+
if (mask(y, x_shifted + 2))
|
| 120 |
+
dst.z = op(src.z);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 124 |
+
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
|
| 125 |
+
{
|
| 126 |
+
if (mask(y, x_shifted))
|
| 127 |
+
dst.x = op(src1.x, src2.x);
|
| 128 |
+
if (mask(y, x_shifted + 1))
|
| 129 |
+
dst.y = op(src1.y, src2.y);
|
| 130 |
+
if (mask(y, x_shifted + 2))
|
| 131 |
+
dst.z = op(src1.z, src2.z);
|
| 132 |
+
}
|
| 133 |
+
};
|
| 134 |
+
template <> struct OpUnroller<4>
|
| 135 |
+
{
|
| 136 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 137 |
+
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
|
| 138 |
+
{
|
| 139 |
+
if (mask(y, x_shifted))
|
| 140 |
+
dst.x = op(src.x);
|
| 141 |
+
if (mask(y, x_shifted + 1))
|
| 142 |
+
dst.y = op(src.y);
|
| 143 |
+
if (mask(y, x_shifted + 2))
|
| 144 |
+
dst.z = op(src.z);
|
| 145 |
+
if (mask(y, x_shifted + 3))
|
| 146 |
+
dst.w = op(src.w);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 150 |
+
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
|
| 151 |
+
{
|
| 152 |
+
if (mask(y, x_shifted))
|
| 153 |
+
dst.x = op(src1.x, src2.x);
|
| 154 |
+
if (mask(y, x_shifted + 1))
|
| 155 |
+
dst.y = op(src1.y, src2.y);
|
| 156 |
+
if (mask(y, x_shifted + 2))
|
| 157 |
+
dst.z = op(src1.z, src2.z);
|
| 158 |
+
if (mask(y, x_shifted + 3))
|
| 159 |
+
dst.w = op(src1.w, src2.w);
|
| 160 |
+
}
|
| 161 |
+
};
|
| 162 |
+
template <> struct OpUnroller<8>
|
| 163 |
+
{
|
| 164 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 165 |
+
static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
|
| 166 |
+
{
|
| 167 |
+
if (mask(y, x_shifted))
|
| 168 |
+
dst.a0 = op(src.a0);
|
| 169 |
+
if (mask(y, x_shifted + 1))
|
| 170 |
+
dst.a1 = op(src.a1);
|
| 171 |
+
if (mask(y, x_shifted + 2))
|
| 172 |
+
dst.a2 = op(src.a2);
|
| 173 |
+
if (mask(y, x_shifted + 3))
|
| 174 |
+
dst.a3 = op(src.a3);
|
| 175 |
+
if (mask(y, x_shifted + 4))
|
| 176 |
+
dst.a4 = op(src.a4);
|
| 177 |
+
if (mask(y, x_shifted + 5))
|
| 178 |
+
dst.a5 = op(src.a5);
|
| 179 |
+
if (mask(y, x_shifted + 6))
|
| 180 |
+
dst.a6 = op(src.a6);
|
| 181 |
+
if (mask(y, x_shifted + 7))
|
| 182 |
+
dst.a7 = op(src.a7);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 186 |
+
static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
|
| 187 |
+
{
|
| 188 |
+
if (mask(y, x_shifted))
|
| 189 |
+
dst.a0 = op(src1.a0, src2.a0);
|
| 190 |
+
if (mask(y, x_shifted + 1))
|
| 191 |
+
dst.a1 = op(src1.a1, src2.a1);
|
| 192 |
+
if (mask(y, x_shifted + 2))
|
| 193 |
+
dst.a2 = op(src1.a2, src2.a2);
|
| 194 |
+
if (mask(y, x_shifted + 3))
|
| 195 |
+
dst.a3 = op(src1.a3, src2.a3);
|
| 196 |
+
if (mask(y, x_shifted + 4))
|
| 197 |
+
dst.a4 = op(src1.a4, src2.a4);
|
| 198 |
+
if (mask(y, x_shifted + 5))
|
| 199 |
+
dst.a5 = op(src1.a5, src2.a5);
|
| 200 |
+
if (mask(y, x_shifted + 6))
|
| 201 |
+
dst.a6 = op(src1.a6, src2.a6);
|
| 202 |
+
if (mask(y, x_shifted + 7))
|
| 203 |
+
dst.a7 = op(src1.a7, src2.a7);
|
| 204 |
+
}
|
| 205 |
+
};
|
| 206 |
+
|
| 207 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 208 |
+
static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
|
| 209 |
+
{
|
| 210 |
+
typedef TransformFunctorTraits<UnOp> ft;
|
| 211 |
+
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
|
| 212 |
+
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::write_type write_type;
|
| 213 |
+
|
| 214 |
+
const int x = threadIdx.x + blockIdx.x * blockDim.x;
|
| 215 |
+
const int y = threadIdx.y + blockIdx.y * blockDim.y;
|
| 216 |
+
const int x_shifted = x * ft::smart_shift;
|
| 217 |
+
|
| 218 |
+
if (y < src_.rows)
|
| 219 |
+
{
|
| 220 |
+
const T* src = src_.ptr(y);
|
| 221 |
+
D* dst = dst_.ptr(y);
|
| 222 |
+
|
| 223 |
+
if (x_shifted + ft::smart_shift - 1 < src_.cols)
|
| 224 |
+
{
|
| 225 |
+
const read_type src_n_el = ((const read_type*)src)[x];
|
| 226 |
+
OpUnroller<ft::smart_shift>::unroll(src_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y);
|
| 227 |
+
}
|
| 228 |
+
else
|
| 229 |
+
{
|
| 230 |
+
for (int real_x = x_shifted; real_x < src_.cols; ++real_x)
|
| 231 |
+
{
|
| 232 |
+
if (mask(y, real_x))
|
| 233 |
+
dst[real_x] = op(src[real_x]);
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 240 |
+
__global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
|
| 241 |
+
{
|
| 242 |
+
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
| 243 |
+
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
| 244 |
+
|
| 245 |
+
if (x < src.cols && y < src.rows && mask(y, x))
|
| 246 |
+
{
|
| 247 |
+
dst.ptr(y)[x] = op(src.ptr(y)[x]);
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 252 |
+
static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
|
| 253 |
+
const Mask mask, const BinOp op)
|
| 254 |
+
{
|
| 255 |
+
typedef TransformFunctorTraits<BinOp> ft;
|
| 256 |
+
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type1 read_type1;
|
| 257 |
+
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type2 read_type2;
|
| 258 |
+
typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::write_type write_type;
|
| 259 |
+
|
| 260 |
+
const int x = threadIdx.x + blockIdx.x * blockDim.x;
|
| 261 |
+
const int y = threadIdx.y + blockIdx.y * blockDim.y;
|
| 262 |
+
const int x_shifted = x * ft::smart_shift;
|
| 263 |
+
|
| 264 |
+
if (y < src1_.rows)
|
| 265 |
+
{
|
| 266 |
+
const T1* src1 = src1_.ptr(y);
|
| 267 |
+
const T2* src2 = src2_.ptr(y);
|
| 268 |
+
D* dst = dst_.ptr(y);
|
| 269 |
+
|
| 270 |
+
if (x_shifted + ft::smart_shift - 1 < src1_.cols)
|
| 271 |
+
{
|
| 272 |
+
const read_type1 src1_n_el = ((const read_type1*)src1)[x];
|
| 273 |
+
const read_type2 src2_n_el = ((const read_type2*)src2)[x];
|
| 274 |
+
|
| 275 |
+
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y);
|
| 276 |
+
}
|
| 277 |
+
else
|
| 278 |
+
{
|
| 279 |
+
for (int real_x = x_shifted; real_x < src1_.cols; ++real_x)
|
| 280 |
+
{
|
| 281 |
+
if (mask(y, real_x))
|
| 282 |
+
dst[real_x] = op(src1[real_x], src2[real_x]);
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 289 |
+
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
|
| 290 |
+
const Mask mask, const BinOp op)
|
| 291 |
+
{
|
| 292 |
+
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
| 293 |
+
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
| 294 |
+
|
| 295 |
+
if (x < src1.cols && y < src1.rows && mask(y, x))
|
| 296 |
+
{
|
| 297 |
+
const T1 src1_data = src1.ptr(y)[x];
|
| 298 |
+
const T2 src2_data = src2.ptr(y)[x];
|
| 299 |
+
dst.ptr(y)[x] = op(src1_data, src2_data);
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
template <bool UseSmart> struct TransformDispatcher;
|
| 304 |
+
template<> struct TransformDispatcher<false>
|
| 305 |
+
{
|
| 306 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 307 |
+
static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
|
| 308 |
+
{
|
| 309 |
+
typedef TransformFunctorTraits<UnOp> ft;
|
| 310 |
+
|
| 311 |
+
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
| 312 |
+
const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
|
| 313 |
+
|
| 314 |
+
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
| 315 |
+
cudaSafeCall( cudaGetLastError() );
|
| 316 |
+
|
| 317 |
+
if (stream == 0)
|
| 318 |
+
cudaSafeCall( cudaDeviceSynchronize() );
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 322 |
+
static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
|
| 323 |
+
{
|
| 324 |
+
typedef TransformFunctorTraits<BinOp> ft;
|
| 325 |
+
|
| 326 |
+
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
| 327 |
+
const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
|
| 328 |
+
|
| 329 |
+
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
| 330 |
+
cudaSafeCall( cudaGetLastError() );
|
| 331 |
+
|
| 332 |
+
if (stream == 0)
|
| 333 |
+
cudaSafeCall( cudaDeviceSynchronize() );
|
| 334 |
+
}
|
| 335 |
+
};
|
| 336 |
+
template<> struct TransformDispatcher<true>
|
| 337 |
+
{
|
| 338 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 339 |
+
static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
|
| 340 |
+
{
|
| 341 |
+
typedef TransformFunctorTraits<UnOp> ft;
|
| 342 |
+
|
| 343 |
+
CV_StaticAssert(ft::smart_shift != 1, "");
|
| 344 |
+
|
| 345 |
+
if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
|
| 346 |
+
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
|
| 347 |
+
{
|
| 348 |
+
TransformDispatcher<false>::call(src, dst, op, mask, stream);
|
| 349 |
+
return;
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
| 353 |
+
const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
|
| 354 |
+
|
| 355 |
+
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
| 356 |
+
cudaSafeCall( cudaGetLastError() );
|
| 357 |
+
|
| 358 |
+
if (stream == 0)
|
| 359 |
+
cudaSafeCall( cudaDeviceSynchronize() );
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 363 |
+
static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
|
| 364 |
+
{
|
| 365 |
+
typedef TransformFunctorTraits<BinOp> ft;
|
| 366 |
+
|
| 367 |
+
CV_StaticAssert(ft::smart_shift != 1, "");
|
| 368 |
+
|
| 369 |
+
if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) ||
|
| 370 |
+
!isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) ||
|
| 371 |
+
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
|
| 372 |
+
{
|
| 373 |
+
TransformDispatcher<false>::call(src1, src2, dst, op, mask, stream);
|
| 374 |
+
return;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
| 378 |
+
const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
|
| 379 |
+
|
| 380 |
+
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
| 381 |
+
cudaSafeCall( cudaGetLastError() );
|
| 382 |
+
|
| 383 |
+
if (stream == 0)
|
| 384 |
+
cudaSafeCall( cudaDeviceSynchronize() );
|
| 385 |
+
}
|
| 386 |
+
};
|
| 387 |
+
} // namespace transform_detail
|
| 388 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 389 |
+
|
| 390 |
+
//! @endcond
|
| 391 |
+
|
| 392 |
+
#endif // OPENCV_CUDA_TRANSFORM_DETAIL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/detail/type_traits_detail.hpp
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP
|
| 44 |
+
#define OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP
|
| 45 |
+
|
| 46 |
+
#include "../common.hpp"
|
| 47 |
+
#include "../vec_traits.hpp"
|
| 48 |
+
|
| 49 |
+
//! @cond IGNORED
|
| 50 |
+
|
| 51 |
+
namespace cv { namespace cuda { namespace device
|
| 52 |
+
{
|
| 53 |
+
namespace type_traits_detail
|
| 54 |
+
{
|
| 55 |
+
template <bool, typename T1, typename T2> struct Select { typedef T1 type; };
|
| 56 |
+
template <typename T1, typename T2> struct Select<false, T1, T2> { typedef T2 type; };
|
| 57 |
+
|
| 58 |
+
template <typename T> struct IsSignedIntergral { enum {value = 0}; };
|
| 59 |
+
template <> struct IsSignedIntergral<schar> { enum {value = 1}; };
|
| 60 |
+
template <> struct IsSignedIntergral<char1> { enum {value = 1}; };
|
| 61 |
+
template <> struct IsSignedIntergral<short> { enum {value = 1}; };
|
| 62 |
+
template <> struct IsSignedIntergral<short1> { enum {value = 1}; };
|
| 63 |
+
template <> struct IsSignedIntergral<int> { enum {value = 1}; };
|
| 64 |
+
template <> struct IsSignedIntergral<int1> { enum {value = 1}; };
|
| 65 |
+
|
| 66 |
+
template <typename T> struct IsUnsignedIntegral { enum {value = 0}; };
|
| 67 |
+
template <> struct IsUnsignedIntegral<uchar> { enum {value = 1}; };
|
| 68 |
+
template <> struct IsUnsignedIntegral<uchar1> { enum {value = 1}; };
|
| 69 |
+
template <> struct IsUnsignedIntegral<ushort> { enum {value = 1}; };
|
| 70 |
+
template <> struct IsUnsignedIntegral<ushort1> { enum {value = 1}; };
|
| 71 |
+
template <> struct IsUnsignedIntegral<uint> { enum {value = 1}; };
|
| 72 |
+
template <> struct IsUnsignedIntegral<uint1> { enum {value = 1}; };
|
| 73 |
+
|
| 74 |
+
template <typename T> struct IsIntegral { enum {value = IsSignedIntergral<T>::value || IsUnsignedIntegral<T>::value}; };
|
| 75 |
+
template <> struct IsIntegral<char> { enum {value = 1}; };
|
| 76 |
+
template <> struct IsIntegral<bool> { enum {value = 1}; };
|
| 77 |
+
|
| 78 |
+
template <typename T> struct IsFloat { enum {value = 0}; };
|
| 79 |
+
template <> struct IsFloat<float> { enum {value = 1}; };
|
| 80 |
+
template <> struct IsFloat<double> { enum {value = 1}; };
|
| 81 |
+
|
| 82 |
+
template <typename T> struct IsVec { enum {value = 0}; };
|
| 83 |
+
template <> struct IsVec<uchar1> { enum {value = 1}; };
|
| 84 |
+
template <> struct IsVec<uchar2> { enum {value = 1}; };
|
| 85 |
+
template <> struct IsVec<uchar3> { enum {value = 1}; };
|
| 86 |
+
template <> struct IsVec<uchar4> { enum {value = 1}; };
|
| 87 |
+
template <> struct IsVec<uchar8> { enum {value = 1}; };
|
| 88 |
+
template <> struct IsVec<char1> { enum {value = 1}; };
|
| 89 |
+
template <> struct IsVec<char2> { enum {value = 1}; };
|
| 90 |
+
template <> struct IsVec<char3> { enum {value = 1}; };
|
| 91 |
+
template <> struct IsVec<char4> { enum {value = 1}; };
|
| 92 |
+
template <> struct IsVec<char8> { enum {value = 1}; };
|
| 93 |
+
template <> struct IsVec<ushort1> { enum {value = 1}; };
|
| 94 |
+
template <> struct IsVec<ushort2> { enum {value = 1}; };
|
| 95 |
+
template <> struct IsVec<ushort3> { enum {value = 1}; };
|
| 96 |
+
template <> struct IsVec<ushort4> { enum {value = 1}; };
|
| 97 |
+
template <> struct IsVec<ushort8> { enum {value = 1}; };
|
| 98 |
+
template <> struct IsVec<short1> { enum {value = 1}; };
|
| 99 |
+
template <> struct IsVec<short2> { enum {value = 1}; };
|
| 100 |
+
template <> struct IsVec<short3> { enum {value = 1}; };
|
| 101 |
+
template <> struct IsVec<short4> { enum {value = 1}; };
|
| 102 |
+
template <> struct IsVec<short8> { enum {value = 1}; };
|
| 103 |
+
template <> struct IsVec<uint1> { enum {value = 1}; };
|
| 104 |
+
template <> struct IsVec<uint2> { enum {value = 1}; };
|
| 105 |
+
template <> struct IsVec<uint3> { enum {value = 1}; };
|
| 106 |
+
template <> struct IsVec<uint4> { enum {value = 1}; };
|
| 107 |
+
template <> struct IsVec<uint8> { enum {value = 1}; };
|
| 108 |
+
template <> struct IsVec<int1> { enum {value = 1}; };
|
| 109 |
+
template <> struct IsVec<int2> { enum {value = 1}; };
|
| 110 |
+
template <> struct IsVec<int3> { enum {value = 1}; };
|
| 111 |
+
template <> struct IsVec<int4> { enum {value = 1}; };
|
| 112 |
+
template <> struct IsVec<int8> { enum {value = 1}; };
|
| 113 |
+
template <> struct IsVec<float1> { enum {value = 1}; };
|
| 114 |
+
template <> struct IsVec<float2> { enum {value = 1}; };
|
| 115 |
+
template <> struct IsVec<float3> { enum {value = 1}; };
|
| 116 |
+
template <> struct IsVec<float4> { enum {value = 1}; };
|
| 117 |
+
template <> struct IsVec<float8> { enum {value = 1}; };
|
| 118 |
+
template <> struct IsVec<double1> { enum {value = 1}; };
|
| 119 |
+
template <> struct IsVec<double2> { enum {value = 1}; };
|
| 120 |
+
template <> struct IsVec<double3> { enum {value = 1}; };
|
| 121 |
+
template <> struct IsVec<double4> { enum {value = 1}; };
|
| 122 |
+
template <> struct IsVec<double8> { enum {value = 1}; };
|
| 123 |
+
|
| 124 |
+
template <class U> struct AddParameterType { typedef const U& type; };
|
| 125 |
+
template <class U> struct AddParameterType<U&> { typedef U& type; };
|
| 126 |
+
template <> struct AddParameterType<void> { typedef void type; };
|
| 127 |
+
|
| 128 |
+
template <class U> struct ReferenceTraits
|
| 129 |
+
{
|
| 130 |
+
enum { value = false };
|
| 131 |
+
typedef U type;
|
| 132 |
+
};
|
| 133 |
+
template <class U> struct ReferenceTraits<U&>
|
| 134 |
+
{
|
| 135 |
+
enum { value = true };
|
| 136 |
+
typedef U type;
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
template <class U> struct PointerTraits
|
| 140 |
+
{
|
| 141 |
+
enum { value = false };
|
| 142 |
+
typedef void type;
|
| 143 |
+
};
|
| 144 |
+
template <class U> struct PointerTraits<U*>
|
| 145 |
+
{
|
| 146 |
+
enum { value = true };
|
| 147 |
+
typedef U type;
|
| 148 |
+
};
|
| 149 |
+
template <class U> struct PointerTraits<U*&>
|
| 150 |
+
{
|
| 151 |
+
enum { value = true };
|
| 152 |
+
typedef U type;
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
template <class U> struct UnConst
|
| 156 |
+
{
|
| 157 |
+
typedef U type;
|
| 158 |
+
enum { value = 0 };
|
| 159 |
+
};
|
| 160 |
+
template <class U> struct UnConst<const U>
|
| 161 |
+
{
|
| 162 |
+
typedef U type;
|
| 163 |
+
enum { value = 1 };
|
| 164 |
+
};
|
| 165 |
+
template <class U> struct UnConst<const U&>
|
| 166 |
+
{
|
| 167 |
+
typedef U& type;
|
| 168 |
+
enum { value = 1 };
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
template <class U> struct UnVolatile
|
| 172 |
+
{
|
| 173 |
+
typedef U type;
|
| 174 |
+
enum { value = 0 };
|
| 175 |
+
};
|
| 176 |
+
template <class U> struct UnVolatile<volatile U>
|
| 177 |
+
{
|
| 178 |
+
typedef U type;
|
| 179 |
+
enum { value = 1 };
|
| 180 |
+
};
|
| 181 |
+
template <class U> struct UnVolatile<volatile U&>
|
| 182 |
+
{
|
| 183 |
+
typedef U& type;
|
| 184 |
+
enum { value = 1 };
|
| 185 |
+
};
|
| 186 |
+
} // namespace type_traits_detail
|
| 187 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 188 |
+
|
| 189 |
+
//! @endcond
|
| 190 |
+
|
| 191 |
+
#endif // OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/detail/vec_distance_detail.hpp
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP
|
| 44 |
+
#define OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP
|
| 45 |
+
|
| 46 |
+
#include "../datamov_utils.hpp"
|
| 47 |
+
|
| 48 |
+
//! @cond IGNORED
|
| 49 |
+
|
| 50 |
+
namespace cv { namespace cuda { namespace device
|
| 51 |
+
{
|
| 52 |
+
namespace vec_distance_detail
|
| 53 |
+
{
|
| 54 |
+
template <int THREAD_DIM, int N> struct UnrollVecDiffCached
|
| 55 |
+
{
|
| 56 |
+
template <typename Dist, typename T1, typename T2>
|
| 57 |
+
static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind)
|
| 58 |
+
{
|
| 59 |
+
if (ind < len)
|
| 60 |
+
{
|
| 61 |
+
T1 val1 = *vecCached++;
|
| 62 |
+
|
| 63 |
+
T2 val2;
|
| 64 |
+
ForceGlob<T2>::Load(vecGlob, ind, val2);
|
| 65 |
+
|
| 66 |
+
dist.reduceIter(val1, val2);
|
| 67 |
+
|
| 68 |
+
UnrollVecDiffCached<THREAD_DIM, N - 1>::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM);
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
template <typename Dist, typename T1, typename T2>
|
| 73 |
+
static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist)
|
| 74 |
+
{
|
| 75 |
+
T1 val1 = *vecCached++;
|
| 76 |
+
|
| 77 |
+
T2 val2;
|
| 78 |
+
ForceGlob<T2>::Load(vecGlob, 0, val2);
|
| 79 |
+
vecGlob += THREAD_DIM;
|
| 80 |
+
|
| 81 |
+
dist.reduceIter(val1, val2);
|
| 82 |
+
|
| 83 |
+
UnrollVecDiffCached<THREAD_DIM, N - 1>::calcWithoutCheck(vecCached, vecGlob, dist);
|
| 84 |
+
}
|
| 85 |
+
};
|
| 86 |
+
template <int THREAD_DIM> struct UnrollVecDiffCached<THREAD_DIM, 0>
|
| 87 |
+
{
|
| 88 |
+
template <typename Dist, typename T1, typename T2>
|
| 89 |
+
static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int)
|
| 90 |
+
{
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template <typename Dist, typename T1, typename T2>
|
| 94 |
+
static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&)
|
| 95 |
+
{
|
| 96 |
+
}
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN> struct VecDiffCachedCalculator;
|
| 100 |
+
template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, false>
|
| 101 |
+
{
|
| 102 |
+
template <typename Dist, typename T1, typename T2>
|
| 103 |
+
static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
|
| 104 |
+
{
|
| 105 |
+
UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcCheck(vecCached, vecGlob, len, dist, tid);
|
| 106 |
+
}
|
| 107 |
+
};
|
| 108 |
+
template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, true>
|
| 109 |
+
{
|
| 110 |
+
template <typename Dist, typename T1, typename T2>
|
| 111 |
+
static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
|
| 112 |
+
{
|
| 113 |
+
UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcWithoutCheck(vecCached, vecGlob + tid, dist);
|
| 114 |
+
}
|
| 115 |
+
};
|
| 116 |
+
} // namespace vec_distance_detail
|
| 117 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 118 |
+
|
| 119 |
+
//! @endcond
|
| 120 |
+
|
| 121 |
+
#endif // OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/dynamic_smem.hpp
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_DYNAMIC_SMEM_HPP
|
| 44 |
+
#define OPENCV_CUDA_DYNAMIC_SMEM_HPP
|
| 45 |
+
|
| 46 |
+
/** @file
|
| 47 |
+
* @deprecated Use @ref cudev instead.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
template<class T> struct DynamicSharedMem
|
| 55 |
+
{
|
| 56 |
+
__device__ __forceinline__ operator T*()
|
| 57 |
+
{
|
| 58 |
+
extern __shared__ int __smem[];
|
| 59 |
+
return (T*)__smem;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
__device__ __forceinline__ operator const T*() const
|
| 63 |
+
{
|
| 64 |
+
extern __shared__ int __smem[];
|
| 65 |
+
return (T*)__smem;
|
| 66 |
+
}
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
// specialize for double to avoid unaligned memory access compile errors
|
| 70 |
+
template<> struct DynamicSharedMem<double>
|
| 71 |
+
{
|
| 72 |
+
__device__ __forceinline__ operator double*()
|
| 73 |
+
{
|
| 74 |
+
extern __shared__ double __smem_d[];
|
| 75 |
+
return (double*)__smem_d;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
__device__ __forceinline__ operator const double*() const
|
| 79 |
+
{
|
| 80 |
+
extern __shared__ double __smem_d[];
|
| 81 |
+
return (double*)__smem_d;
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
}}}
|
| 85 |
+
|
| 86 |
+
//! @endcond
|
| 87 |
+
|
| 88 |
+
#endif // OPENCV_CUDA_DYNAMIC_SMEM_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/emulation.hpp
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_EMULATION_HPP_
|
| 44 |
+
#define OPENCV_CUDA_EMULATION_HPP_
|
| 45 |
+
|
| 46 |
+
#include "common.hpp"
|
| 47 |
+
#include "warp_reduce.hpp"
|
| 48 |
+
|
| 49 |
+
/** @file
|
| 50 |
+
* @deprecated Use @ref cudev instead.
|
| 51 |
+
*/
|
| 52 |
+
|
| 53 |
+
//! @cond IGNORED
|
| 54 |
+
|
| 55 |
+
namespace cv { namespace cuda { namespace device
|
| 56 |
+
{
|
| 57 |
+
struct Emulation
|
| 58 |
+
{
|
| 59 |
+
|
| 60 |
+
static __device__ __forceinline__ int syncthreadsOr(int pred)
|
| 61 |
+
{
|
| 62 |
+
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
|
| 63 |
+
// just campilation stab
|
| 64 |
+
return 0;
|
| 65 |
+
#else
|
| 66 |
+
return __syncthreads_or(pred);
|
| 67 |
+
#endif
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
template<int CTA_SIZE>
|
| 71 |
+
static __forceinline__ __device__ int Ballot(int predicate)
|
| 72 |
+
{
|
| 73 |
+
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
|
| 74 |
+
return __ballot(predicate);
|
| 75 |
+
#else
|
| 76 |
+
__shared__ volatile int cta_buffer[CTA_SIZE];
|
| 77 |
+
|
| 78 |
+
int tid = threadIdx.x;
|
| 79 |
+
cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
|
| 80 |
+
return warp_reduce(cta_buffer);
|
| 81 |
+
#endif
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
struct smem
|
| 85 |
+
{
|
| 86 |
+
enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U };
|
| 87 |
+
|
| 88 |
+
template<typename T>
|
| 89 |
+
static __device__ __forceinline__ T atomicInc(T* address, T val)
|
| 90 |
+
{
|
| 91 |
+
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
|
| 92 |
+
T count;
|
| 93 |
+
unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
|
| 94 |
+
do
|
| 95 |
+
{
|
| 96 |
+
count = *address & TAG_MASK;
|
| 97 |
+
count = tag | (count + 1);
|
| 98 |
+
*address = count;
|
| 99 |
+
} while (*address != count);
|
| 100 |
+
|
| 101 |
+
return (count & TAG_MASK) - 1;
|
| 102 |
+
#else
|
| 103 |
+
return ::atomicInc(address, val);
|
| 104 |
+
#endif
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
template<typename T>
|
| 108 |
+
static __device__ __forceinline__ T atomicAdd(T* address, T val)
|
| 109 |
+
{
|
| 110 |
+
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
|
| 111 |
+
T count;
|
| 112 |
+
unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
|
| 113 |
+
do
|
| 114 |
+
{
|
| 115 |
+
count = *address & TAG_MASK;
|
| 116 |
+
count = tag | (count + val);
|
| 117 |
+
*address = count;
|
| 118 |
+
} while (*address != count);
|
| 119 |
+
|
| 120 |
+
return (count & TAG_MASK) - val;
|
| 121 |
+
#else
|
| 122 |
+
return ::atomicAdd(address, val);
|
| 123 |
+
#endif
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
template<typename T>
|
| 127 |
+
static __device__ __forceinline__ T atomicMin(T* address, T val)
|
| 128 |
+
{
|
| 129 |
+
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
|
| 130 |
+
T count = ::min(*address, val);
|
| 131 |
+
do
|
| 132 |
+
{
|
| 133 |
+
*address = count;
|
| 134 |
+
} while (*address > count);
|
| 135 |
+
|
| 136 |
+
return count;
|
| 137 |
+
#else
|
| 138 |
+
return ::atomicMin(address, val);
|
| 139 |
+
#endif
|
| 140 |
+
}
|
| 141 |
+
}; // struct cmem
|
| 142 |
+
|
| 143 |
+
struct glob
|
| 144 |
+
{
|
| 145 |
+
static __device__ __forceinline__ int atomicAdd(int* address, int val)
|
| 146 |
+
{
|
| 147 |
+
return ::atomicAdd(address, val);
|
| 148 |
+
}
|
| 149 |
+
static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val)
|
| 150 |
+
{
|
| 151 |
+
return ::atomicAdd(address, val);
|
| 152 |
+
}
|
| 153 |
+
static __device__ __forceinline__ float atomicAdd(float* address, float val)
|
| 154 |
+
{
|
| 155 |
+
#if __CUDA_ARCH__ >= 200
|
| 156 |
+
return ::atomicAdd(address, val);
|
| 157 |
+
#else
|
| 158 |
+
int* address_as_i = (int*) address;
|
| 159 |
+
int old = *address_as_i, assumed;
|
| 160 |
+
do {
|
| 161 |
+
assumed = old;
|
| 162 |
+
old = ::atomicCAS(address_as_i, assumed,
|
| 163 |
+
__float_as_int(val + __int_as_float(assumed)));
|
| 164 |
+
} while (assumed != old);
|
| 165 |
+
return __int_as_float(old);
|
| 166 |
+
#endif
|
| 167 |
+
}
|
| 168 |
+
static __device__ __forceinline__ double atomicAdd(double* address, double val)
|
| 169 |
+
{
|
| 170 |
+
#if __CUDA_ARCH__ >= 130
|
| 171 |
+
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
| 172 |
+
unsigned long long int old = *address_as_ull, assumed;
|
| 173 |
+
do {
|
| 174 |
+
assumed = old;
|
| 175 |
+
old = ::atomicCAS(address_as_ull, assumed,
|
| 176 |
+
__double_as_longlong(val + __longlong_as_double(assumed)));
|
| 177 |
+
} while (assumed != old);
|
| 178 |
+
return __longlong_as_double(old);
|
| 179 |
+
#else
|
| 180 |
+
CV_UNUSED(address);
|
| 181 |
+
CV_UNUSED(val);
|
| 182 |
+
return 0.0;
|
| 183 |
+
#endif
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
static __device__ __forceinline__ int atomicMin(int* address, int val)
|
| 187 |
+
{
|
| 188 |
+
return ::atomicMin(address, val);
|
| 189 |
+
}
|
| 190 |
+
static __device__ __forceinline__ float atomicMin(float* address, float val)
|
| 191 |
+
{
|
| 192 |
+
#if __CUDA_ARCH__ >= 120
|
| 193 |
+
int* address_as_i = (int*) address;
|
| 194 |
+
int old = *address_as_i, assumed;
|
| 195 |
+
do {
|
| 196 |
+
assumed = old;
|
| 197 |
+
old = ::atomicCAS(address_as_i, assumed,
|
| 198 |
+
__float_as_int(::fminf(val, __int_as_float(assumed))));
|
| 199 |
+
} while (assumed != old);
|
| 200 |
+
return __int_as_float(old);
|
| 201 |
+
#else
|
| 202 |
+
CV_UNUSED(address);
|
| 203 |
+
CV_UNUSED(val);
|
| 204 |
+
return 0.0f;
|
| 205 |
+
#endif
|
| 206 |
+
}
|
| 207 |
+
static __device__ __forceinline__ double atomicMin(double* address, double val)
|
| 208 |
+
{
|
| 209 |
+
#if __CUDA_ARCH__ >= 130
|
| 210 |
+
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
| 211 |
+
unsigned long long int old = *address_as_ull, assumed;
|
| 212 |
+
do {
|
| 213 |
+
assumed = old;
|
| 214 |
+
old = ::atomicCAS(address_as_ull, assumed,
|
| 215 |
+
__double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
|
| 216 |
+
} while (assumed != old);
|
| 217 |
+
return __longlong_as_double(old);
|
| 218 |
+
#else
|
| 219 |
+
CV_UNUSED(address);
|
| 220 |
+
CV_UNUSED(val);
|
| 221 |
+
return 0.0;
|
| 222 |
+
#endif
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
static __device__ __forceinline__ int atomicMax(int* address, int val)
|
| 226 |
+
{
|
| 227 |
+
return ::atomicMax(address, val);
|
| 228 |
+
}
|
| 229 |
+
static __device__ __forceinline__ float atomicMax(float* address, float val)
|
| 230 |
+
{
|
| 231 |
+
#if __CUDA_ARCH__ >= 120
|
| 232 |
+
int* address_as_i = (int*) address;
|
| 233 |
+
int old = *address_as_i, assumed;
|
| 234 |
+
do {
|
| 235 |
+
assumed = old;
|
| 236 |
+
old = ::atomicCAS(address_as_i, assumed,
|
| 237 |
+
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
|
| 238 |
+
} while (assumed != old);
|
| 239 |
+
return __int_as_float(old);
|
| 240 |
+
#else
|
| 241 |
+
CV_UNUSED(address);
|
| 242 |
+
CV_UNUSED(val);
|
| 243 |
+
return 0.0f;
|
| 244 |
+
#endif
|
| 245 |
+
}
|
| 246 |
+
static __device__ __forceinline__ double atomicMax(double* address, double val)
|
| 247 |
+
{
|
| 248 |
+
#if __CUDA_ARCH__ >= 130
|
| 249 |
+
unsigned long long int* address_as_ull = (unsigned long long int*) address;
|
| 250 |
+
unsigned long long int old = *address_as_ull, assumed;
|
| 251 |
+
do {
|
| 252 |
+
assumed = old;
|
| 253 |
+
old = ::atomicCAS(address_as_ull, assumed,
|
| 254 |
+
__double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
|
| 255 |
+
} while (assumed != old);
|
| 256 |
+
return __longlong_as_double(old);
|
| 257 |
+
#else
|
| 258 |
+
CV_UNUSED(address);
|
| 259 |
+
CV_UNUSED(val);
|
| 260 |
+
return 0.0;
|
| 261 |
+
#endif
|
| 262 |
+
}
|
| 263 |
+
};
|
| 264 |
+
}; //struct Emulation
|
| 265 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 266 |
+
|
| 267 |
+
//! @endcond
|
| 268 |
+
|
| 269 |
+
#endif /* OPENCV_CUDA_EMULATION_HPP_ */
|
3rdparty/opencv/include/opencv2/core/cuda/filters.hpp
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_FILTERS_HPP
|
| 44 |
+
#define OPENCV_CUDA_FILTERS_HPP
|
| 45 |
+
|
| 46 |
+
#include "saturate_cast.hpp"
|
| 47 |
+
#include "vec_traits.hpp"
|
| 48 |
+
#include "vec_math.hpp"
|
| 49 |
+
#include "type_traits.hpp"
|
| 50 |
+
#include "nppdefs.h"
|
| 51 |
+
|
| 52 |
+
/** @file
|
| 53 |
+
* @deprecated Use @ref cudev instead.
|
| 54 |
+
*/
|
| 55 |
+
|
| 56 |
+
//! @cond IGNORED
|
| 57 |
+
|
| 58 |
+
namespace cv { namespace cuda { namespace device
|
| 59 |
+
{
|
| 60 |
+
template <typename Ptr2D> struct PointFilter
|
| 61 |
+
{
|
| 62 |
+
typedef typename Ptr2D::elem_type elem_type;
|
| 63 |
+
typedef float index_type;
|
| 64 |
+
|
| 65 |
+
explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
|
| 66 |
+
: src(src_)
|
| 67 |
+
{
|
| 68 |
+
CV_UNUSED(fx);
|
| 69 |
+
CV_UNUSED(fy);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
__device__ __forceinline__ elem_type operator ()(float y, float x) const
|
| 73 |
+
{
|
| 74 |
+
return src(__float2int_rz(y), __float2int_rz(x));
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
Ptr2D src;
|
| 78 |
+
};
|
| 79 |
+
|
| 80 |
+
template <typename Ptr2D> struct LinearFilter
|
| 81 |
+
{
|
| 82 |
+
typedef typename Ptr2D::elem_type elem_type;
|
| 83 |
+
typedef float index_type;
|
| 84 |
+
|
| 85 |
+
explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
|
| 86 |
+
: src(src_)
|
| 87 |
+
{
|
| 88 |
+
CV_UNUSED(fx);
|
| 89 |
+
CV_UNUSED(fy);
|
| 90 |
+
}
|
| 91 |
+
__device__ __forceinline__ elem_type operator ()(float y, float x) const
|
| 92 |
+
{
|
| 93 |
+
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
|
| 94 |
+
|
| 95 |
+
work_type out = VecTraits<work_type>::all(0);
|
| 96 |
+
|
| 97 |
+
const int x1 = __float2int_rd(x);
|
| 98 |
+
const int y1 = __float2int_rd(y);
|
| 99 |
+
if (x1 <= NPP_MIN_32S || x1 >= NPP_MAX_32S || y1 <= NPP_MIN_32S || y1 >= NPP_MAX_32S)
|
| 100 |
+
{
|
| 101 |
+
elem_type src_reg = src(y1, x1);
|
| 102 |
+
out = out + src_reg * 1.0f;
|
| 103 |
+
return saturate_cast<elem_type>(out);
|
| 104 |
+
}
|
| 105 |
+
const int x2 = x1 + 1;
|
| 106 |
+
const int y2 = y1 + 1;
|
| 107 |
+
|
| 108 |
+
elem_type src_reg = src(y1, x1);
|
| 109 |
+
out = out + src_reg * ((x2 - x) * (y2 - y));
|
| 110 |
+
|
| 111 |
+
src_reg = src(y1, x2);
|
| 112 |
+
out = out + src_reg * ((x - x1) * (y2 - y));
|
| 113 |
+
|
| 114 |
+
src_reg = src(y2, x1);
|
| 115 |
+
out = out + src_reg * ((x2 - x) * (y - y1));
|
| 116 |
+
|
| 117 |
+
src_reg = src(y2, x2);
|
| 118 |
+
out = out + src_reg * ((x - x1) * (y - y1));
|
| 119 |
+
|
| 120 |
+
return saturate_cast<elem_type>(out);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
Ptr2D src;
|
| 124 |
+
};
|
| 125 |
+
|
| 126 |
+
template <typename Ptr2D> struct CubicFilter
|
| 127 |
+
{
|
| 128 |
+
typedef typename Ptr2D::elem_type elem_type;
|
| 129 |
+
typedef float index_type;
|
| 130 |
+
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
|
| 131 |
+
|
| 132 |
+
explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
|
| 133 |
+
: src(src_)
|
| 134 |
+
{
|
| 135 |
+
CV_UNUSED(fx);
|
| 136 |
+
CV_UNUSED(fy);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
static __device__ __forceinline__ float bicubicCoeff(float x_)
|
| 140 |
+
{
|
| 141 |
+
float x = fabsf(x_);
|
| 142 |
+
if (x <= 1.0f)
|
| 143 |
+
{
|
| 144 |
+
return x * x * (1.5f * x - 2.5f) + 1.0f;
|
| 145 |
+
}
|
| 146 |
+
else if (x < 2.0f)
|
| 147 |
+
{
|
| 148 |
+
return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
|
| 149 |
+
}
|
| 150 |
+
else
|
| 151 |
+
{
|
| 152 |
+
return 0.0f;
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
__device__ elem_type operator ()(float y, float x) const
|
| 157 |
+
{
|
| 158 |
+
const float xmin = ::ceilf(x - 2.0f);
|
| 159 |
+
const float xmax = ::floorf(x + 2.0f);
|
| 160 |
+
|
| 161 |
+
const float ymin = ::ceilf(y - 2.0f);
|
| 162 |
+
const float ymax = ::floorf(y + 2.0f);
|
| 163 |
+
|
| 164 |
+
work_type sum = VecTraits<work_type>::all(0);
|
| 165 |
+
float wsum = 0.0f;
|
| 166 |
+
|
| 167 |
+
for (float cy = ymin; cy <= ymax; cy += 1.0f)
|
| 168 |
+
{
|
| 169 |
+
for (float cx = xmin; cx <= xmax; cx += 1.0f)
|
| 170 |
+
{
|
| 171 |
+
const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy);
|
| 172 |
+
sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx));
|
| 173 |
+
wsum += w;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
work_type res = (!wsum)? VecTraits<work_type>::all(0) : sum / wsum;
|
| 178 |
+
|
| 179 |
+
return saturate_cast<elem_type>(res);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
Ptr2D src;
|
| 183 |
+
};
|
| 184 |
+
// for integer scaling
|
| 185 |
+
template <typename Ptr2D> struct IntegerAreaFilter
|
| 186 |
+
{
|
| 187 |
+
typedef typename Ptr2D::elem_type elem_type;
|
| 188 |
+
typedef float index_type;
|
| 189 |
+
|
| 190 |
+
explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
|
| 191 |
+
: src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {}
|
| 192 |
+
|
| 193 |
+
__device__ __forceinline__ elem_type operator ()(float y, float x) const
|
| 194 |
+
{
|
| 195 |
+
float fsx1 = x * scale_x;
|
| 196 |
+
float fsx2 = fsx1 + scale_x;
|
| 197 |
+
|
| 198 |
+
int sx1 = __float2int_ru(fsx1);
|
| 199 |
+
int sx2 = __float2int_rd(fsx2);
|
| 200 |
+
|
| 201 |
+
float fsy1 = y * scale_y;
|
| 202 |
+
float fsy2 = fsy1 + scale_y;
|
| 203 |
+
|
| 204 |
+
int sy1 = __float2int_ru(fsy1);
|
| 205 |
+
int sy2 = __float2int_rd(fsy2);
|
| 206 |
+
|
| 207 |
+
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
|
| 208 |
+
work_type out = VecTraits<work_type>::all(0.f);
|
| 209 |
+
|
| 210 |
+
for(int dy = sy1; dy < sy2; ++dy)
|
| 211 |
+
for(int dx = sx1; dx < sx2; ++dx)
|
| 212 |
+
{
|
| 213 |
+
out = out + src(dy, dx) * scale;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
return saturate_cast<elem_type>(out);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
Ptr2D src;
|
| 220 |
+
float scale_x, scale_y ,scale;
|
| 221 |
+
};
|
| 222 |
+
|
| 223 |
+
template <typename Ptr2D> struct AreaFilter
|
| 224 |
+
{
|
| 225 |
+
typedef typename Ptr2D::elem_type elem_type;
|
| 226 |
+
typedef float index_type;
|
| 227 |
+
|
| 228 |
+
explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
|
| 229 |
+
: src(src_), scale_x(scale_x_), scale_y(scale_y_){}
|
| 230 |
+
|
| 231 |
+
__device__ __forceinline__ elem_type operator ()(float y, float x) const
|
| 232 |
+
{
|
| 233 |
+
float fsx1 = x * scale_x;
|
| 234 |
+
float fsx2 = fsx1 + scale_x;
|
| 235 |
+
|
| 236 |
+
int sx1 = __float2int_ru(fsx1);
|
| 237 |
+
int sx2 = __float2int_rd(fsx2);
|
| 238 |
+
|
| 239 |
+
float fsy1 = y * scale_y;
|
| 240 |
+
float fsy2 = fsy1 + scale_y;
|
| 241 |
+
|
| 242 |
+
int sy1 = __float2int_ru(fsy1);
|
| 243 |
+
int sy2 = __float2int_rd(fsy2);
|
| 244 |
+
|
| 245 |
+
float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1));
|
| 246 |
+
|
| 247 |
+
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
|
| 248 |
+
work_type out = VecTraits<work_type>::all(0.f);
|
| 249 |
+
|
| 250 |
+
for (int dy = sy1; dy < sy2; ++dy)
|
| 251 |
+
{
|
| 252 |
+
for (int dx = sx1; dx < sx2; ++dx)
|
| 253 |
+
out = out + src(dy, dx) * scale;
|
| 254 |
+
|
| 255 |
+
if (sx1 > fsx1)
|
| 256 |
+
out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale);
|
| 257 |
+
|
| 258 |
+
if (sx2 < fsx2)
|
| 259 |
+
out = out + src(dy, sx2) * ((fsx2 -sx2) * scale);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
if (sy1 > fsy1)
|
| 263 |
+
for (int dx = sx1; dx < sx2; ++dx)
|
| 264 |
+
out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale);
|
| 265 |
+
|
| 266 |
+
if (sy2 < fsy2)
|
| 267 |
+
for (int dx = sx1; dx < sx2; ++dx)
|
| 268 |
+
out = out + src(sy2, dx) * ((fsy2 -sy2) * scale);
|
| 269 |
+
|
| 270 |
+
if ((sy1 > fsy1) && (sx1 > fsx1))
|
| 271 |
+
out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale);
|
| 272 |
+
|
| 273 |
+
if ((sy1 > fsy1) && (sx2 < fsx2))
|
| 274 |
+
out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale);
|
| 275 |
+
|
| 276 |
+
if ((sy2 < fsy2) && (sx2 < fsx2))
|
| 277 |
+
out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale);
|
| 278 |
+
|
| 279 |
+
if ((sy2 < fsy2) && (sx1 > fsx1))
|
| 280 |
+
out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale);
|
| 281 |
+
|
| 282 |
+
return saturate_cast<elem_type>(out);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
Ptr2D src;
|
| 286 |
+
float scale_x, scale_y;
|
| 287 |
+
int width, haight;
|
| 288 |
+
};
|
| 289 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 290 |
+
|
| 291 |
+
//! @endcond
|
| 292 |
+
|
| 293 |
+
#endif // OPENCV_CUDA_FILTERS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/funcattrib.hpp
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP
|
| 44 |
+
#define OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP
|
| 45 |
+
|
| 46 |
+
#include <cstdio>
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
template<class Func>
|
| 57 |
+
void printFuncAttrib(Func& func)
|
| 58 |
+
{
|
| 59 |
+
|
| 60 |
+
cudaFuncAttributes attrs;
|
| 61 |
+
cudaFuncGetAttributes(&attrs, func);
|
| 62 |
+
|
| 63 |
+
printf("=== Function stats ===\n");
|
| 64 |
+
printf("Name: \n");
|
| 65 |
+
printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes);
|
| 66 |
+
printf("constSizeBytes = %d\n", attrs.constSizeBytes);
|
| 67 |
+
printf("localSizeBytes = %d\n", attrs.localSizeBytes);
|
| 68 |
+
printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock);
|
| 69 |
+
printf("numRegs = %d\n", attrs.numRegs);
|
| 70 |
+
printf("ptxVersion = %d\n", attrs.ptxVersion);
|
| 71 |
+
printf("binaryVersion = %d\n", attrs.binaryVersion);
|
| 72 |
+
printf("\n");
|
| 73 |
+
fflush(stdout);
|
| 74 |
+
}
|
| 75 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 76 |
+
|
| 77 |
+
//! @endcond
|
| 78 |
+
|
| 79 |
+
#endif /* OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda/functional.hpp
ADDED
|
@@ -0,0 +1,805 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_FUNCTIONAL_HPP
|
| 44 |
+
#define OPENCV_CUDA_FUNCTIONAL_HPP
|
| 45 |
+
|
| 46 |
+
#include <functional>
|
| 47 |
+
#include "saturate_cast.hpp"
|
| 48 |
+
#include "vec_traits.hpp"
|
| 49 |
+
#include "type_traits.hpp"
|
| 50 |
+
|
| 51 |
+
/** @file
|
| 52 |
+
* @deprecated Use @ref cudev instead.
|
| 53 |
+
*/
|
| 54 |
+
|
| 55 |
+
//! @cond IGNORED
|
| 56 |
+
|
| 57 |
+
namespace cv { namespace cuda { namespace device
|
| 58 |
+
{
|
| 59 |
+
// Function Objects
|
| 60 |
+
template<typename Argument, typename Result> struct unary_function
|
| 61 |
+
{
|
| 62 |
+
typedef Argument argument_type;
|
| 63 |
+
typedef Result result_type;
|
| 64 |
+
};
|
| 65 |
+
template<typename Argument1, typename Argument2, typename Result> struct binary_function
|
| 66 |
+
{
|
| 67 |
+
typedef Argument1 first_argument_type;
|
| 68 |
+
typedef Argument2 second_argument_type;
|
| 69 |
+
typedef Result result_type;
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
// Arithmetic Operations
|
| 73 |
+
template <typename T> struct plus : binary_function<T, T, T>
|
| 74 |
+
{
|
| 75 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 76 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 77 |
+
{
|
| 78 |
+
return a + b;
|
| 79 |
+
}
|
| 80 |
+
__host__ __device__ __forceinline__ plus() {}
|
| 81 |
+
__host__ __device__ __forceinline__ plus(const plus&) {}
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
template <typename T> struct minus : binary_function<T, T, T>
|
| 85 |
+
{
|
| 86 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 87 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 88 |
+
{
|
| 89 |
+
return a - b;
|
| 90 |
+
}
|
| 91 |
+
__host__ __device__ __forceinline__ minus() {}
|
| 92 |
+
__host__ __device__ __forceinline__ minus(const minus&) {}
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
template <typename T> struct multiplies : binary_function<T, T, T>
|
| 96 |
+
{
|
| 97 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 98 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 99 |
+
{
|
| 100 |
+
return a * b;
|
| 101 |
+
}
|
| 102 |
+
__host__ __device__ __forceinline__ multiplies() {}
|
| 103 |
+
__host__ __device__ __forceinline__ multiplies(const multiplies&) {}
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
template <typename T> struct divides : binary_function<T, T, T>
|
| 107 |
+
{
|
| 108 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 109 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 110 |
+
{
|
| 111 |
+
return a / b;
|
| 112 |
+
}
|
| 113 |
+
__host__ __device__ __forceinline__ divides() {}
|
| 114 |
+
__host__ __device__ __forceinline__ divides(const divides&) {}
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
template <typename T> struct modulus : binary_function<T, T, T>
|
| 118 |
+
{
|
| 119 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 120 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 121 |
+
{
|
| 122 |
+
return a % b;
|
| 123 |
+
}
|
| 124 |
+
__host__ __device__ __forceinline__ modulus() {}
|
| 125 |
+
__host__ __device__ __forceinline__ modulus(const modulus&) {}
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
template <typename T> struct negate : unary_function<T, T>
|
| 129 |
+
{
|
| 130 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a) const
|
| 131 |
+
{
|
| 132 |
+
return -a;
|
| 133 |
+
}
|
| 134 |
+
__host__ __device__ __forceinline__ negate() {}
|
| 135 |
+
__host__ __device__ __forceinline__ negate(const negate&) {}
|
| 136 |
+
};
|
| 137 |
+
|
| 138 |
+
// Comparison Operations
|
| 139 |
+
template <typename T> struct equal_to : binary_function<T, T, bool>
|
| 140 |
+
{
|
| 141 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 142 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 143 |
+
{
|
| 144 |
+
return a == b;
|
| 145 |
+
}
|
| 146 |
+
__host__ __device__ __forceinline__ equal_to() {}
|
| 147 |
+
__host__ __device__ __forceinline__ equal_to(const equal_to&) {}
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
template <typename T> struct not_equal_to : binary_function<T, T, bool>
|
| 151 |
+
{
|
| 152 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 153 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 154 |
+
{
|
| 155 |
+
return a != b;
|
| 156 |
+
}
|
| 157 |
+
__host__ __device__ __forceinline__ not_equal_to() {}
|
| 158 |
+
__host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {}
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
template <typename T> struct greater : binary_function<T, T, bool>
|
| 162 |
+
{
|
| 163 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 164 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 165 |
+
{
|
| 166 |
+
return a > b;
|
| 167 |
+
}
|
| 168 |
+
__host__ __device__ __forceinline__ greater() {}
|
| 169 |
+
__host__ __device__ __forceinline__ greater(const greater&) {}
|
| 170 |
+
};
|
| 171 |
+
|
| 172 |
+
template <typename T> struct less : binary_function<T, T, bool>
|
| 173 |
+
{
|
| 174 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 175 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 176 |
+
{
|
| 177 |
+
return a < b;
|
| 178 |
+
}
|
| 179 |
+
__host__ __device__ __forceinline__ less() {}
|
| 180 |
+
__host__ __device__ __forceinline__ less(const less&) {}
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
template <typename T> struct greater_equal : binary_function<T, T, bool>
|
| 184 |
+
{
|
| 185 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 186 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 187 |
+
{
|
| 188 |
+
return a >= b;
|
| 189 |
+
}
|
| 190 |
+
__host__ __device__ __forceinline__ greater_equal() {}
|
| 191 |
+
__host__ __device__ __forceinline__ greater_equal(const greater_equal&) {}
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
template <typename T> struct less_equal : binary_function<T, T, bool>
|
| 195 |
+
{
|
| 196 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 197 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 198 |
+
{
|
| 199 |
+
return a <= b;
|
| 200 |
+
}
|
| 201 |
+
__host__ __device__ __forceinline__ less_equal() {}
|
| 202 |
+
__host__ __device__ __forceinline__ less_equal(const less_equal&) {}
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
// Logical Operations
|
| 206 |
+
template <typename T> struct logical_and : binary_function<T, T, bool>
|
| 207 |
+
{
|
| 208 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 209 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 210 |
+
{
|
| 211 |
+
return a && b;
|
| 212 |
+
}
|
| 213 |
+
__host__ __device__ __forceinline__ logical_and() {}
|
| 214 |
+
__host__ __device__ __forceinline__ logical_and(const logical_and&) {}
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
template <typename T> struct logical_or : binary_function<T, T, bool>
|
| 218 |
+
{
|
| 219 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
|
| 220 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 221 |
+
{
|
| 222 |
+
return a || b;
|
| 223 |
+
}
|
| 224 |
+
__host__ __device__ __forceinline__ logical_or() {}
|
| 225 |
+
__host__ __device__ __forceinline__ logical_or(const logical_or&) {}
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
template <typename T> struct logical_not : unary_function<T, bool>
|
| 229 |
+
{
|
| 230 |
+
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a) const
|
| 231 |
+
{
|
| 232 |
+
return !a;
|
| 233 |
+
}
|
| 234 |
+
__host__ __device__ __forceinline__ logical_not() {}
|
| 235 |
+
__host__ __device__ __forceinline__ logical_not(const logical_not&) {}
|
| 236 |
+
};
|
| 237 |
+
|
| 238 |
+
// Bitwise Operations
|
| 239 |
+
template <typename T> struct bit_and : binary_function<T, T, T>
|
| 240 |
+
{
|
| 241 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 242 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 243 |
+
{
|
| 244 |
+
return a & b;
|
| 245 |
+
}
|
| 246 |
+
__host__ __device__ __forceinline__ bit_and() {}
|
| 247 |
+
__host__ __device__ __forceinline__ bit_and(const bit_and&) {}
|
| 248 |
+
};
|
| 249 |
+
|
| 250 |
+
template <typename T> struct bit_or : binary_function<T, T, T>
|
| 251 |
+
{
|
| 252 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 253 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 254 |
+
{
|
| 255 |
+
return a | b;
|
| 256 |
+
}
|
| 257 |
+
__host__ __device__ __forceinline__ bit_or() {}
|
| 258 |
+
__host__ __device__ __forceinline__ bit_or(const bit_or&) {}
|
| 259 |
+
};
|
| 260 |
+
|
| 261 |
+
template <typename T> struct bit_xor : binary_function<T, T, T>
|
| 262 |
+
{
|
| 263 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
|
| 264 |
+
typename TypeTraits<T>::ParameterType b) const
|
| 265 |
+
{
|
| 266 |
+
return a ^ b;
|
| 267 |
+
}
|
| 268 |
+
__host__ __device__ __forceinline__ bit_xor() {}
|
| 269 |
+
__host__ __device__ __forceinline__ bit_xor(const bit_xor&) {}
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
template <typename T> struct bit_not : unary_function<T, T>
|
| 273 |
+
{
|
| 274 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType v) const
|
| 275 |
+
{
|
| 276 |
+
return ~v;
|
| 277 |
+
}
|
| 278 |
+
__host__ __device__ __forceinline__ bit_not() {}
|
| 279 |
+
__host__ __device__ __forceinline__ bit_not(const bit_not&) {}
|
| 280 |
+
};
|
| 281 |
+
|
| 282 |
+
// Generalized Identity Operations
|
| 283 |
+
template <typename T> struct identity : unary_function<T, T>
|
| 284 |
+
{
|
| 285 |
+
__device__ __forceinline__ typename TypeTraits<T>::ParameterType operator()(typename TypeTraits<T>::ParameterType x) const
|
| 286 |
+
{
|
| 287 |
+
return x;
|
| 288 |
+
}
|
| 289 |
+
__host__ __device__ __forceinline__ identity() {}
|
| 290 |
+
__host__ __device__ __forceinline__ identity(const identity&) {}
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>
|
| 294 |
+
{
|
| 295 |
+
__device__ __forceinline__ typename TypeTraits<T1>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
|
| 296 |
+
{
|
| 297 |
+
return lhs;
|
| 298 |
+
}
|
| 299 |
+
__host__ __device__ __forceinline__ project1st() {}
|
| 300 |
+
__host__ __device__ __forceinline__ project1st(const project1st&) {}
|
| 301 |
+
};
|
| 302 |
+
|
| 303 |
+
template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>
|
| 304 |
+
{
|
| 305 |
+
__device__ __forceinline__ typename TypeTraits<T2>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
|
| 306 |
+
{
|
| 307 |
+
return rhs;
|
| 308 |
+
}
|
| 309 |
+
__host__ __device__ __forceinline__ project2nd() {}
|
| 310 |
+
__host__ __device__ __forceinline__ project2nd(const project2nd&) {}
|
| 311 |
+
};
|
| 312 |
+
|
| 313 |
+
// Min/Max Operations
|
| 314 |
+
|
| 315 |
+
#define OPENCV_CUDA_IMPLEMENT_MINMAX(name, type, op) \
|
| 316 |
+
template <> struct name<type> : binary_function<type, type, type> \
|
| 317 |
+
{ \
|
| 318 |
+
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
|
| 319 |
+
__host__ __device__ __forceinline__ name() {}\
|
| 320 |
+
__host__ __device__ __forceinline__ name(const name&) {}\
|
| 321 |
+
};
|
| 322 |
+
|
| 323 |
+
template <typename T> struct maximum : binary_function<T, T, T>
|
| 324 |
+
{
|
| 325 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
|
| 326 |
+
{
|
| 327 |
+
return max(lhs, rhs);
|
| 328 |
+
}
|
| 329 |
+
__host__ __device__ __forceinline__ maximum() {}
|
| 330 |
+
__host__ __device__ __forceinline__ maximum(const maximum&) {}
|
| 331 |
+
};
|
| 332 |
+
|
| 333 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uchar, ::max)
|
| 334 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, schar, ::max)
|
| 335 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, char, ::max)
|
| 336 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, ushort, ::max)
|
| 337 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, short, ::max)
|
| 338 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, int, ::max)
|
| 339 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uint, ::max)
|
| 340 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, float, ::fmax)
|
| 341 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, double, ::fmax)
|
| 342 |
+
|
| 343 |
+
template <typename T> struct minimum : binary_function<T, T, T>
|
| 344 |
+
{
|
| 345 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
|
| 346 |
+
{
|
| 347 |
+
return min(lhs, rhs);
|
| 348 |
+
}
|
| 349 |
+
__host__ __device__ __forceinline__ minimum() {}
|
| 350 |
+
__host__ __device__ __forceinline__ minimum(const minimum&) {}
|
| 351 |
+
};
|
| 352 |
+
|
| 353 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uchar, ::min)
|
| 354 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, schar, ::min)
|
| 355 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, char, ::min)
|
| 356 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, ushort, ::min)
|
| 357 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, short, ::min)
|
| 358 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, int, ::min)
|
| 359 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uint, ::min)
|
| 360 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, float, ::fmin)
|
| 361 |
+
OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, double, ::fmin)
|
| 362 |
+
|
| 363 |
+
#undef OPENCV_CUDA_IMPLEMENT_MINMAX
|
| 364 |
+
|
| 365 |
+
// Math functions
|
| 366 |
+
|
| 367 |
+
template <typename T> struct abs_func : unary_function<T, T>
|
| 368 |
+
{
|
| 369 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType x) const
|
| 370 |
+
{
|
| 371 |
+
return abs(x);
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 375 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 376 |
+
};
|
| 377 |
+
template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>
|
| 378 |
+
{
|
| 379 |
+
__device__ __forceinline__ unsigned char operator ()(unsigned char x) const
|
| 380 |
+
{
|
| 381 |
+
return x;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 385 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 386 |
+
};
|
| 387 |
+
template <> struct abs_func<signed char> : unary_function<signed char, signed char>
|
| 388 |
+
{
|
| 389 |
+
__device__ __forceinline__ signed char operator ()(signed char x) const
|
| 390 |
+
{
|
| 391 |
+
return ::abs((int)x);
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 395 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 396 |
+
};
|
| 397 |
+
template <> struct abs_func<char> : unary_function<char, char>
|
| 398 |
+
{
|
| 399 |
+
__device__ __forceinline__ char operator ()(char x) const
|
| 400 |
+
{
|
| 401 |
+
return ::abs((int)x);
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 405 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 406 |
+
};
|
| 407 |
+
template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>
|
| 408 |
+
{
|
| 409 |
+
__device__ __forceinline__ unsigned short operator ()(unsigned short x) const
|
| 410 |
+
{
|
| 411 |
+
return x;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 415 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 416 |
+
};
|
| 417 |
+
template <> struct abs_func<short> : unary_function<short, short>
|
| 418 |
+
{
|
| 419 |
+
__device__ __forceinline__ short operator ()(short x) const
|
| 420 |
+
{
|
| 421 |
+
return ::abs((int)x);
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 425 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 426 |
+
};
|
| 427 |
+
template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>
|
| 428 |
+
{
|
| 429 |
+
__device__ __forceinline__ unsigned int operator ()(unsigned int x) const
|
| 430 |
+
{
|
| 431 |
+
return x;
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 435 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 436 |
+
};
|
| 437 |
+
template <> struct abs_func<int> : unary_function<int, int>
|
| 438 |
+
{
|
| 439 |
+
__device__ __forceinline__ int operator ()(int x) const
|
| 440 |
+
{
|
| 441 |
+
return ::abs(x);
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 445 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 446 |
+
};
|
| 447 |
+
template <> struct abs_func<float> : unary_function<float, float>
|
| 448 |
+
{
|
| 449 |
+
__device__ __forceinline__ float operator ()(float x) const
|
| 450 |
+
{
|
| 451 |
+
return ::fabsf(x);
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 455 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 456 |
+
};
|
| 457 |
+
template <> struct abs_func<double> : unary_function<double, double>
|
| 458 |
+
{
|
| 459 |
+
__device__ __forceinline__ double operator ()(double x) const
|
| 460 |
+
{
|
| 461 |
+
return ::fabs(x);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
__host__ __device__ __forceinline__ abs_func() {}
|
| 465 |
+
__host__ __device__ __forceinline__ abs_func(const abs_func&) {}
|
| 466 |
+
};
|
| 467 |
+
|
| 468 |
+
#define OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(name, func) \
|
| 469 |
+
template <typename T> struct name ## _func : unary_function<T, float> \
|
| 470 |
+
{ \
|
| 471 |
+
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v) const \
|
| 472 |
+
{ \
|
| 473 |
+
return func ## f(v); \
|
| 474 |
+
} \
|
| 475 |
+
__host__ __device__ __forceinline__ name ## _func() {} \
|
| 476 |
+
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
|
| 477 |
+
}; \
|
| 478 |
+
template <> struct name ## _func<double> : unary_function<double, double> \
|
| 479 |
+
{ \
|
| 480 |
+
__device__ __forceinline__ double operator ()(double v) const \
|
| 481 |
+
{ \
|
| 482 |
+
return func(v); \
|
| 483 |
+
} \
|
| 484 |
+
__host__ __device__ __forceinline__ name ## _func() {} \
|
| 485 |
+
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
|
| 486 |
+
};
|
| 487 |
+
|
| 488 |
+
#define OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(name, func) \
|
| 489 |
+
template <typename T> struct name ## _func : binary_function<T, T, float> \
|
| 490 |
+
{ \
|
| 491 |
+
__device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v1, typename TypeTraits<T>::ParameterType v2) const \
|
| 492 |
+
{ \
|
| 493 |
+
return func ## f(v1, v2); \
|
| 494 |
+
} \
|
| 495 |
+
__host__ __device__ __forceinline__ name ## _func() {} \
|
| 496 |
+
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
|
| 497 |
+
}; \
|
| 498 |
+
template <> struct name ## _func<double> : binary_function<double, double, double> \
|
| 499 |
+
{ \
|
| 500 |
+
__device__ __forceinline__ double operator ()(double v1, double v2) const \
|
| 501 |
+
{ \
|
| 502 |
+
return func(v1, v2); \
|
| 503 |
+
} \
|
| 504 |
+
__host__ __device__ __forceinline__ name ## _func() {} \
|
| 505 |
+
__host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
|
| 506 |
+
};
|
| 507 |
+
|
| 508 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
|
| 509 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
|
| 510 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
|
| 511 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)
|
| 512 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log, ::log)
|
| 513 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log2, ::log2)
|
| 514 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log10, ::log10)
|
| 515 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sin, ::sin)
|
| 516 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cos, ::cos)
|
| 517 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tan, ::tan)
|
| 518 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asin, ::asin)
|
| 519 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acos, ::acos)
|
| 520 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atan, ::atan)
|
| 521 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)
|
| 522 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)
|
| 523 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)
|
| 524 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)
|
| 525 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)
|
| 526 |
+
OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)
|
| 527 |
+
|
| 528 |
+
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)
|
| 529 |
+
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)
|
| 530 |
+
OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
|
| 531 |
+
|
| 532 |
+
#undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR
|
| 533 |
+
#undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
|
| 534 |
+
#undef OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR
|
| 535 |
+
|
| 536 |
+
template<typename T> struct hypot_sqr_func : binary_function<T, T, float>
|
| 537 |
+
{
|
| 538 |
+
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType src1, typename TypeTraits<T>::ParameterType src2) const
|
| 539 |
+
{
|
| 540 |
+
return src1 * src1 + src2 * src2;
|
| 541 |
+
}
|
| 542 |
+
__host__ __device__ __forceinline__ hypot_sqr_func() {}
|
| 543 |
+
__host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {}
|
| 544 |
+
};
|
| 545 |
+
|
| 546 |
+
// Saturate Cast Functor
|
| 547 |
+
template <typename T, typename D> struct saturate_cast_func : unary_function<T, D>
|
| 548 |
+
{
|
| 549 |
+
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType v) const
|
| 550 |
+
{
|
| 551 |
+
return saturate_cast<D>(v);
|
| 552 |
+
}
|
| 553 |
+
__host__ __device__ __forceinline__ saturate_cast_func() {}
|
| 554 |
+
__host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {}
|
| 555 |
+
};
|
| 556 |
+
|
| 557 |
+
// Threshold Functors
|
| 558 |
+
template <typename T> struct thresh_binary_func : unary_function<T, T>
|
| 559 |
+
{
|
| 560 |
+
__host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
|
| 561 |
+
|
| 562 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
|
| 563 |
+
{
|
| 564 |
+
return (src > thresh) * maxVal;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
__host__ __device__ __forceinline__ thresh_binary_func() {}
|
| 568 |
+
__host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
|
| 569 |
+
: thresh(other.thresh), maxVal(other.maxVal) {}
|
| 570 |
+
|
| 571 |
+
T thresh;
|
| 572 |
+
T maxVal;
|
| 573 |
+
};
|
| 574 |
+
|
| 575 |
+
template <typename T> struct thresh_binary_inv_func : unary_function<T, T>
|
| 576 |
+
{
|
| 577 |
+
__host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
|
| 578 |
+
|
| 579 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
|
| 580 |
+
{
|
| 581 |
+
return (src <= thresh) * maxVal;
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
__host__ __device__ __forceinline__ thresh_binary_inv_func() {}
|
| 585 |
+
__host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
|
| 586 |
+
: thresh(other.thresh), maxVal(other.maxVal) {}
|
| 587 |
+
|
| 588 |
+
T thresh;
|
| 589 |
+
T maxVal;
|
| 590 |
+
};
|
| 591 |
+
|
| 592 |
+
template <typename T> struct thresh_trunc_func : unary_function<T, T>
|
| 593 |
+
{
|
| 594 |
+
explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
|
| 595 |
+
|
| 596 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
|
| 597 |
+
{
|
| 598 |
+
return minimum<T>()(src, thresh);
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
__host__ __device__ __forceinline__ thresh_trunc_func() {}
|
| 602 |
+
__host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
|
| 603 |
+
: thresh(other.thresh) {}
|
| 604 |
+
|
| 605 |
+
T thresh;
|
| 606 |
+
};
|
| 607 |
+
|
| 608 |
+
template <typename T> struct thresh_to_zero_func : unary_function<T, T>
|
| 609 |
+
{
|
| 610 |
+
explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
|
| 611 |
+
|
| 612 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
|
| 613 |
+
{
|
| 614 |
+
return (src > thresh) * src;
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
__host__ __device__ __forceinline__ thresh_to_zero_func() {}
|
| 618 |
+
__host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
|
| 619 |
+
: thresh(other.thresh) {}
|
| 620 |
+
|
| 621 |
+
T thresh;
|
| 622 |
+
};
|
| 623 |
+
|
| 624 |
+
template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>
|
| 625 |
+
{
|
| 626 |
+
explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);}
|
| 627 |
+
|
| 628 |
+
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
|
| 629 |
+
{
|
| 630 |
+
return (src <= thresh) * src;
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
__host__ __device__ __forceinline__ thresh_to_zero_inv_func() {}
|
| 634 |
+
__host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
|
| 635 |
+
: thresh(other.thresh) {}
|
| 636 |
+
|
| 637 |
+
T thresh;
|
| 638 |
+
};
|
| 639 |
+
|
| 640 |
+
// Function Object Adaptors
|
| 641 |
+
template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>
|
| 642 |
+
{
|
| 643 |
+
explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {}
|
| 644 |
+
|
| 645 |
+
__device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::argument_type>::ParameterType x) const
|
| 646 |
+
{
|
| 647 |
+
return !pred(x);
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
__host__ __device__ __forceinline__ unary_negate() {}
|
| 651 |
+
__host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}
|
| 652 |
+
|
| 653 |
+
Predicate pred;
|
| 654 |
+
};
|
| 655 |
+
|
| 656 |
+
template <typename Predicate> __host__ __device__ __forceinline__ unary_negate<Predicate> not1(const Predicate& pred)
|
| 657 |
+
{
|
| 658 |
+
return unary_negate<Predicate>(pred);
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
template <typename Predicate> struct binary_negate : binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
|
| 662 |
+
{
|
| 663 |
+
explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {}
|
| 664 |
+
|
| 665 |
+
__device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::first_argument_type>::ParameterType x,
|
| 666 |
+
typename TypeTraits<typename Predicate::second_argument_type>::ParameterType y) const
|
| 667 |
+
{
|
| 668 |
+
return !pred(x,y);
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
__host__ __device__ __forceinline__ binary_negate() {}
|
| 672 |
+
__host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}
|
| 673 |
+
|
| 674 |
+
Predicate pred;
|
| 675 |
+
};
|
| 676 |
+
|
| 677 |
+
template <typename BinaryPredicate> __host__ __device__ __forceinline__ binary_negate<BinaryPredicate> not2(const BinaryPredicate& pred)
|
| 678 |
+
{
|
| 679 |
+
return binary_negate<BinaryPredicate>(pred);
|
| 680 |
+
}
|
| 681 |
+
|
| 682 |
+
template <typename Op> struct binder1st : unary_function<typename Op::second_argument_type, typename Op::result_type>
|
| 683 |
+
{
|
| 684 |
+
__host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {}
|
| 685 |
+
|
| 686 |
+
__device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits<typename Op::second_argument_type>::ParameterType a) const
|
| 687 |
+
{
|
| 688 |
+
return op(arg1, a);
|
| 689 |
+
}
|
| 690 |
+
|
| 691 |
+
__host__ __device__ __forceinline__ binder1st() {}
|
| 692 |
+
__host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}
|
| 693 |
+
|
| 694 |
+
Op op;
|
| 695 |
+
typename Op::first_argument_type arg1;
|
| 696 |
+
};
|
| 697 |
+
|
| 698 |
+
template <typename Op, typename T> __host__ __device__ __forceinline__ binder1st<Op> bind1st(const Op& op, const T& x)
|
| 699 |
+
{
|
| 700 |
+
return binder1st<Op>(op, typename Op::first_argument_type(x));
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
template <typename Op> struct binder2nd : unary_function<typename Op::first_argument_type, typename Op::result_type>
|
| 704 |
+
{
|
| 705 |
+
__host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {}
|
| 706 |
+
|
| 707 |
+
__forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits<typename Op::first_argument_type>::ParameterType a) const
|
| 708 |
+
{
|
| 709 |
+
return op(a, arg2);
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
__host__ __device__ __forceinline__ binder2nd() {}
|
| 713 |
+
__host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}
|
| 714 |
+
|
| 715 |
+
Op op;
|
| 716 |
+
typename Op::second_argument_type arg2;
|
| 717 |
+
};
|
| 718 |
+
|
| 719 |
+
template <typename Op, typename T> __host__ __device__ __forceinline__ binder2nd<Op> bind2nd(const Op& op, const T& x)
|
| 720 |
+
{
|
| 721 |
+
return binder2nd<Op>(op, typename Op::second_argument_type(x));
|
| 722 |
+
}
|
| 723 |
+
|
| 724 |
+
// Functor Traits
|
| 725 |
+
template <typename F> struct IsUnaryFunction
|
| 726 |
+
{
|
| 727 |
+
typedef char Yes;
|
| 728 |
+
struct No {Yes a[2];};
|
| 729 |
+
|
| 730 |
+
template <typename T, typename D> static Yes check(unary_function<T, D>);
|
| 731 |
+
static No check(...);
|
| 732 |
+
|
| 733 |
+
static F makeF();
|
| 734 |
+
|
| 735 |
+
enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
|
| 736 |
+
};
|
| 737 |
+
|
| 738 |
+
template <typename F> struct IsBinaryFunction
|
| 739 |
+
{
|
| 740 |
+
typedef char Yes;
|
| 741 |
+
struct No {Yes a[2];};
|
| 742 |
+
|
| 743 |
+
template <typename T1, typename T2, typename D> static Yes check(binary_function<T1, T2, D>);
|
| 744 |
+
static No check(...);
|
| 745 |
+
|
| 746 |
+
static F makeF();
|
| 747 |
+
|
| 748 |
+
enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
|
| 749 |
+
};
|
| 750 |
+
|
| 751 |
+
namespace functional_detail
|
| 752 |
+
{
|
| 753 |
+
template <size_t src_elem_size, size_t dst_elem_size> struct UnOpShift { enum { shift = 1 }; };
|
| 754 |
+
template <size_t src_elem_size> struct UnOpShift<src_elem_size, 1> { enum { shift = 4 }; };
|
| 755 |
+
template <size_t src_elem_size> struct UnOpShift<src_elem_size, 2> { enum { shift = 2 }; };
|
| 756 |
+
|
| 757 |
+
template <typename T, typename D> struct DefaultUnaryShift
|
| 758 |
+
{
|
| 759 |
+
enum { shift = UnOpShift<sizeof(T), sizeof(D)>::shift };
|
| 760 |
+
};
|
| 761 |
+
|
| 762 |
+
template <size_t src_elem_size1, size_t src_elem_size2, size_t dst_elem_size> struct BinOpShift { enum { shift = 1 }; };
|
| 763 |
+
template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 1> { enum { shift = 4 }; };
|
| 764 |
+
template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 2> { enum { shift = 2 }; };
|
| 765 |
+
|
| 766 |
+
template <typename T1, typename T2, typename D> struct DefaultBinaryShift
|
| 767 |
+
{
|
| 768 |
+
enum { shift = BinOpShift<sizeof(T1), sizeof(T2), sizeof(D)>::shift };
|
| 769 |
+
};
|
| 770 |
+
|
| 771 |
+
template <typename Func, bool unary = IsUnaryFunction<Func>::value> struct ShiftDispatcher;
|
| 772 |
+
template <typename Func> struct ShiftDispatcher<Func, true>
|
| 773 |
+
{
|
| 774 |
+
enum { shift = DefaultUnaryShift<typename Func::argument_type, typename Func::result_type>::shift };
|
| 775 |
+
};
|
| 776 |
+
template <typename Func> struct ShiftDispatcher<Func, false>
|
| 777 |
+
{
|
| 778 |
+
enum { shift = DefaultBinaryShift<typename Func::first_argument_type, typename Func::second_argument_type, typename Func::result_type>::shift };
|
| 779 |
+
};
|
| 780 |
+
}
|
| 781 |
+
|
| 782 |
+
template <typename Func> struct DefaultTransformShift
|
| 783 |
+
{
|
| 784 |
+
enum { shift = functional_detail::ShiftDispatcher<Func>::shift };
|
| 785 |
+
};
|
| 786 |
+
|
| 787 |
+
template <typename Func> struct DefaultTransformFunctorTraits
|
| 788 |
+
{
|
| 789 |
+
enum { simple_block_dim_x = 16 };
|
| 790 |
+
enum { simple_block_dim_y = 16 };
|
| 791 |
+
|
| 792 |
+
enum { smart_block_dim_x = 16 };
|
| 793 |
+
enum { smart_block_dim_y = 16 };
|
| 794 |
+
enum { smart_shift = DefaultTransformShift<Func>::shift };
|
| 795 |
+
};
|
| 796 |
+
|
| 797 |
+
template <typename Func> struct TransformFunctorTraits : DefaultTransformFunctorTraits<Func> {};
|
| 798 |
+
|
| 799 |
+
#define OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(type) \
|
| 800 |
+
template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >
|
| 801 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 802 |
+
|
| 803 |
+
//! @endcond
|
| 804 |
+
|
| 805 |
+
#endif // OPENCV_CUDA_FUNCTIONAL_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/limits.hpp
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_LIMITS_HPP
|
| 44 |
+
#define OPENCV_CUDA_LIMITS_HPP
|
| 45 |
+
|
| 46 |
+
#include <limits.h>
|
| 47 |
+
#include <float.h>
|
| 48 |
+
#include "common.hpp"
|
| 49 |
+
|
| 50 |
+
/** @file
|
| 51 |
+
* @deprecated Use @ref cudev instead.
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
//! @cond IGNORED
|
| 55 |
+
|
| 56 |
+
namespace cv { namespace cuda { namespace device
|
| 57 |
+
{
|
| 58 |
+
template <class T> struct numeric_limits;
|
| 59 |
+
|
| 60 |
+
template <> struct numeric_limits<bool>
|
| 61 |
+
{
|
| 62 |
+
__device__ __forceinline__ static bool min() { return false; }
|
| 63 |
+
__device__ __forceinline__ static bool max() { return true; }
|
| 64 |
+
static const bool is_signed = false;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
template <> struct numeric_limits<signed char>
|
| 68 |
+
{
|
| 69 |
+
__device__ __forceinline__ static signed char min() { return SCHAR_MIN; }
|
| 70 |
+
__device__ __forceinline__ static signed char max() { return SCHAR_MAX; }
|
| 71 |
+
static const bool is_signed = true;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
template <> struct numeric_limits<unsigned char>
|
| 75 |
+
{
|
| 76 |
+
__device__ __forceinline__ static unsigned char min() { return 0; }
|
| 77 |
+
__device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; }
|
| 78 |
+
static const bool is_signed = false;
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
template <> struct numeric_limits<short>
|
| 82 |
+
{
|
| 83 |
+
__device__ __forceinline__ static short min() { return SHRT_MIN; }
|
| 84 |
+
__device__ __forceinline__ static short max() { return SHRT_MAX; }
|
| 85 |
+
static const bool is_signed = true;
|
| 86 |
+
};
|
| 87 |
+
|
| 88 |
+
template <> struct numeric_limits<unsigned short>
|
| 89 |
+
{
|
| 90 |
+
__device__ __forceinline__ static unsigned short min() { return 0; }
|
| 91 |
+
__device__ __forceinline__ static unsigned short max() { return USHRT_MAX; }
|
| 92 |
+
static const bool is_signed = false;
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
template <> struct numeric_limits<int>
|
| 96 |
+
{
|
| 97 |
+
__device__ __forceinline__ static int min() { return INT_MIN; }
|
| 98 |
+
__device__ __forceinline__ static int max() { return INT_MAX; }
|
| 99 |
+
static const bool is_signed = true;
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
template <> struct numeric_limits<unsigned int>
|
| 103 |
+
{
|
| 104 |
+
__device__ __forceinline__ static unsigned int min() { return 0; }
|
| 105 |
+
__device__ __forceinline__ static unsigned int max() { return UINT_MAX; }
|
| 106 |
+
static const bool is_signed = false;
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
template <> struct numeric_limits<float>
|
| 110 |
+
{
|
| 111 |
+
__device__ __forceinline__ static float min() { return FLT_MIN; }
|
| 112 |
+
__device__ __forceinline__ static float max() { return FLT_MAX; }
|
| 113 |
+
__device__ __forceinline__ static float epsilon() { return FLT_EPSILON; }
|
| 114 |
+
static const bool is_signed = true;
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
template <> struct numeric_limits<double>
|
| 118 |
+
{
|
| 119 |
+
__device__ __forceinline__ static double min() { return DBL_MIN; }
|
| 120 |
+
__device__ __forceinline__ static double max() { return DBL_MAX; }
|
| 121 |
+
__device__ __forceinline__ static double epsilon() { return DBL_EPSILON; }
|
| 122 |
+
static const bool is_signed = true;
|
| 123 |
+
};
|
| 124 |
+
}}} // namespace cv { namespace cuda { namespace cudev {
|
| 125 |
+
|
| 126 |
+
//! @endcond
|
| 127 |
+
|
| 128 |
+
#endif // OPENCV_CUDA_LIMITS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/reduce.hpp
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_REDUCE_HPP
|
| 44 |
+
#define OPENCV_CUDA_REDUCE_HPP
|
| 45 |
+
|
| 46 |
+
#ifndef THRUST_DEBUG // eliminate -Wundef warning
|
| 47 |
+
#define THRUST_DEBUG 0
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
#include <thrust/tuple.h>
|
| 51 |
+
#include "detail/reduce.hpp"
|
| 52 |
+
#include "detail/reduce_key_val.hpp"
|
| 53 |
+
|
| 54 |
+
/** @file
|
| 55 |
+
* @deprecated Use @ref cudev instead.
|
| 56 |
+
*/
|
| 57 |
+
|
| 58 |
+
//! @cond IGNORED
|
| 59 |
+
|
| 60 |
+
namespace cv { namespace cuda { namespace device
|
| 61 |
+
{
|
| 62 |
+
template <int N, typename T, class Op>
|
| 63 |
+
__device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)
|
| 64 |
+
{
|
| 65 |
+
reduce_detail::Dispatcher<N>::reductor::template reduce<volatile T*, T&, const Op&>(smem, val, tid, op);
|
| 66 |
+
}
|
| 67 |
+
template <unsigned int N, typename K, typename V, class Cmp>
|
| 68 |
+
__device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp)
|
| 69 |
+
{
|
| 70 |
+
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, volatile V*, V&, const Cmp&>(skeys, key, svals, val, tid, cmp);
|
| 71 |
+
}
|
| 72 |
+
#if (CUDART_VERSION < 12040) // details: https://github.com/opencv/opencv_contrib/issues/3690
|
| 73 |
+
template <int N,
|
| 74 |
+
typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
|
| 75 |
+
typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
|
| 76 |
+
class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
|
| 77 |
+
__device__ __forceinline__ void reduce(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
|
| 78 |
+
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
|
| 79 |
+
unsigned int tid,
|
| 80 |
+
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
|
| 81 |
+
{
|
| 82 |
+
reduce_detail::Dispatcher<N>::reductor::template reduce<
|
| 83 |
+
const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>&,
|
| 84 |
+
const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>&,
|
| 85 |
+
const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>&>(smem, val, tid, op);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template <unsigned int N,
|
| 89 |
+
typename K,
|
| 90 |
+
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 91 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 92 |
+
class Cmp>
|
| 93 |
+
__device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key,
|
| 94 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
|
| 95 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 96 |
+
unsigned int tid, const Cmp& cmp)
|
| 97 |
+
{
|
| 98 |
+
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&,
|
| 99 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
|
| 100 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
|
| 101 |
+
const Cmp&>(skeys, key, svals, val, tid, cmp);
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
template <unsigned int N,
|
| 105 |
+
typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
|
| 106 |
+
typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
|
| 107 |
+
typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
|
| 108 |
+
typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
|
| 109 |
+
class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
|
| 110 |
+
__device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
|
| 111 |
+
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
|
| 112 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
|
| 113 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
|
| 114 |
+
unsigned int tid,
|
| 115 |
+
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp)
|
| 116 |
+
{
|
| 117 |
+
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<
|
| 118 |
+
const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>&,
|
| 119 |
+
const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>&,
|
| 120 |
+
const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
|
| 121 |
+
const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
|
| 122 |
+
const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>&
|
| 123 |
+
>(skeys, key, svals, val, tid, cmp);
|
| 124 |
+
}
|
| 125 |
+
#else
|
| 126 |
+
template <int N, typename... P, typename... R, class... Op>
|
| 127 |
+
__device__ __forceinline__ void reduce(const thrust::tuple<P...>& smem, const thrust::tuple<R...>& val, unsigned int tid, const thrust::tuple<Op...>& op)
|
| 128 |
+
{
|
| 129 |
+
reduce_detail::Dispatcher<N>::reductor::template reduce<const thrust::tuple<P...>&, const thrust::tuple<R...>&, const thrust::tuple<Op...>&>(smem, val, tid, op);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
template <unsigned int N, typename K, typename... VP, typename... VR, class Cmp>
|
| 133 |
+
__device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, const thrust::tuple<VP...>& svals, const thrust::tuple<VR...>& val, unsigned int tid, const Cmp& cmp)
|
| 134 |
+
{
|
| 135 |
+
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, const thrust::tuple<VP...>&, const thrust::tuple<VR...>&, const Cmp&>(skeys, key, svals, val, tid, cmp);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <unsigned int N, typename... KP, typename... KR, typename... VP, typename... VR, class... Cmp>
|
| 139 |
+
__device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP...>& skeys, const thrust::tuple<KR...>& key, const thrust::tuple<VP...>& svals, const thrust::tuple<VR...>& val, unsigned int tid, const thrust::tuple<Cmp...>& cmp)
|
| 140 |
+
{
|
| 141 |
+
reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<const thrust::tuple<KP...>&, const thrust::tuple<KR...>&, const thrust::tuple<VP...>&, const thrust::tuple<VR...>&, const thrust::tuple<Cmp...>&>(skeys, key, svals, val, tid, cmp);
|
| 142 |
+
}
|
| 143 |
+
#endif
|
| 144 |
+
|
| 145 |
+
// smem_tuple
|
| 146 |
+
|
| 147 |
+
template <typename T0>
|
| 148 |
+
__device__ __forceinline__
|
| 149 |
+
thrust::tuple<volatile T0*>
|
| 150 |
+
smem_tuple(T0* t0)
|
| 151 |
+
{
|
| 152 |
+
return thrust::make_tuple((volatile T0*) t0);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
template <typename T0, typename T1>
|
| 156 |
+
__device__ __forceinline__
|
| 157 |
+
thrust::tuple<volatile T0*, volatile T1*>
|
| 158 |
+
smem_tuple(T0* t0, T1* t1)
|
| 159 |
+
{
|
| 160 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
template <typename T0, typename T1, typename T2>
|
| 164 |
+
__device__ __forceinline__
|
| 165 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*>
|
| 166 |
+
smem_tuple(T0* t0, T1* t1, T2* t2)
|
| 167 |
+
{
|
| 168 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
template <typename T0, typename T1, typename T2, typename T3>
|
| 172 |
+
__device__ __forceinline__
|
| 173 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*>
|
| 174 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3)
|
| 175 |
+
{
|
| 176 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4>
|
| 180 |
+
__device__ __forceinline__
|
| 181 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*>
|
| 182 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4)
|
| 183 |
+
{
|
| 184 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
|
| 188 |
+
__device__ __forceinline__
|
| 189 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*>
|
| 190 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5)
|
| 191 |
+
{
|
| 192 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
|
| 196 |
+
__device__ __forceinline__
|
| 197 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*>
|
| 198 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6)
|
| 199 |
+
{
|
| 200 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
|
| 204 |
+
__device__ __forceinline__
|
| 205 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*>
|
| 206 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7)
|
| 207 |
+
{
|
| 208 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
|
| 212 |
+
__device__ __forceinline__
|
| 213 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*>
|
| 214 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8)
|
| 215 |
+
{
|
| 216 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
|
| 220 |
+
__device__ __forceinline__
|
| 221 |
+
thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*, volatile T9*>
|
| 222 |
+
smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9)
|
| 223 |
+
{
|
| 224 |
+
return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9);
|
| 225 |
+
}
|
| 226 |
+
}}}
|
| 227 |
+
|
| 228 |
+
//! @endcond
|
| 229 |
+
|
| 230 |
+
#endif // OPENCV_CUDA_REDUCE_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/saturate_cast.hpp
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_SATURATE_CAST_HPP
|
| 44 |
+
#define OPENCV_CUDA_SATURATE_CAST_HPP
|
| 45 |
+
|
| 46 |
+
#include "common.hpp"
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
|
| 57 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }
|
| 58 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }
|
| 59 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }
|
| 60 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }
|
| 61 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }
|
| 62 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }
|
| 63 |
+
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }
|
| 64 |
+
|
| 65 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)
|
| 66 |
+
{
|
| 67 |
+
uint res = 0;
|
| 68 |
+
int vi = v;
|
| 69 |
+
asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi));
|
| 70 |
+
return res;
|
| 71 |
+
}
|
| 72 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v)
|
| 73 |
+
{
|
| 74 |
+
uint res = 0;
|
| 75 |
+
asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v));
|
| 76 |
+
return res;
|
| 77 |
+
}
|
| 78 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)
|
| 79 |
+
{
|
| 80 |
+
uint res = 0;
|
| 81 |
+
asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v));
|
| 82 |
+
return res;
|
| 83 |
+
}
|
| 84 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v)
|
| 85 |
+
{
|
| 86 |
+
uint res = 0;
|
| 87 |
+
asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v));
|
| 88 |
+
return res;
|
| 89 |
+
}
|
| 90 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)
|
| 91 |
+
{
|
| 92 |
+
uint res = 0;
|
| 93 |
+
asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v));
|
| 94 |
+
return res;
|
| 95 |
+
}
|
| 96 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v)
|
| 97 |
+
{
|
| 98 |
+
uint res = 0;
|
| 99 |
+
asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v));
|
| 100 |
+
return res;
|
| 101 |
+
}
|
| 102 |
+
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)
|
| 103 |
+
{
|
| 104 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 105 |
+
uint res = 0;
|
| 106 |
+
asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v));
|
| 107 |
+
return res;
|
| 108 |
+
#else
|
| 109 |
+
return saturate_cast<uchar>((float)v);
|
| 110 |
+
#endif
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v)
|
| 114 |
+
{
|
| 115 |
+
uint res = 0;
|
| 116 |
+
uint vi = v;
|
| 117 |
+
asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi));
|
| 118 |
+
return res;
|
| 119 |
+
}
|
| 120 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(short v)
|
| 121 |
+
{
|
| 122 |
+
uint res = 0;
|
| 123 |
+
asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v));
|
| 124 |
+
return res;
|
| 125 |
+
}
|
| 126 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v)
|
| 127 |
+
{
|
| 128 |
+
uint res = 0;
|
| 129 |
+
asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v));
|
| 130 |
+
return res;
|
| 131 |
+
}
|
| 132 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(int v)
|
| 133 |
+
{
|
| 134 |
+
uint res = 0;
|
| 135 |
+
asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v));
|
| 136 |
+
return res;
|
| 137 |
+
}
|
| 138 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v)
|
| 139 |
+
{
|
| 140 |
+
uint res = 0;
|
| 141 |
+
asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v));
|
| 142 |
+
return res;
|
| 143 |
+
}
|
| 144 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(float v)
|
| 145 |
+
{
|
| 146 |
+
uint res = 0;
|
| 147 |
+
asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v));
|
| 148 |
+
return res;
|
| 149 |
+
}
|
| 150 |
+
template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)
|
| 151 |
+
{
|
| 152 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 153 |
+
uint res = 0;
|
| 154 |
+
asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v));
|
| 155 |
+
return res;
|
| 156 |
+
#else
|
| 157 |
+
return saturate_cast<schar>((float)v);
|
| 158 |
+
#endif
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)
|
| 162 |
+
{
|
| 163 |
+
ushort res = 0;
|
| 164 |
+
int vi = v;
|
| 165 |
+
asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi));
|
| 166 |
+
return res;
|
| 167 |
+
}
|
| 168 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v)
|
| 169 |
+
{
|
| 170 |
+
ushort res = 0;
|
| 171 |
+
asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v));
|
| 172 |
+
return res;
|
| 173 |
+
}
|
| 174 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v)
|
| 175 |
+
{
|
| 176 |
+
ushort res = 0;
|
| 177 |
+
asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v));
|
| 178 |
+
return res;
|
| 179 |
+
}
|
| 180 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)
|
| 181 |
+
{
|
| 182 |
+
ushort res = 0;
|
| 183 |
+
asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v));
|
| 184 |
+
return res;
|
| 185 |
+
}
|
| 186 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v)
|
| 187 |
+
{
|
| 188 |
+
ushort res = 0;
|
| 189 |
+
asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v));
|
| 190 |
+
return res;
|
| 191 |
+
}
|
| 192 |
+
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)
|
| 193 |
+
{
|
| 194 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 195 |
+
ushort res = 0;
|
| 196 |
+
asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v));
|
| 197 |
+
return res;
|
| 198 |
+
#else
|
| 199 |
+
return saturate_cast<ushort>((float)v);
|
| 200 |
+
#endif
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
template<> __device__ __forceinline__ short saturate_cast<short>(ushort v)
|
| 204 |
+
{
|
| 205 |
+
short res = 0;
|
| 206 |
+
asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v));
|
| 207 |
+
return res;
|
| 208 |
+
}
|
| 209 |
+
template<> __device__ __forceinline__ short saturate_cast<short>(int v)
|
| 210 |
+
{
|
| 211 |
+
short res = 0;
|
| 212 |
+
asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v));
|
| 213 |
+
return res;
|
| 214 |
+
}
|
| 215 |
+
template<> __device__ __forceinline__ short saturate_cast<short>(uint v)
|
| 216 |
+
{
|
| 217 |
+
short res = 0;
|
| 218 |
+
asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v));
|
| 219 |
+
return res;
|
| 220 |
+
}
|
| 221 |
+
template<> __device__ __forceinline__ short saturate_cast<short>(float v)
|
| 222 |
+
{
|
| 223 |
+
short res = 0;
|
| 224 |
+
asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v));
|
| 225 |
+
return res;
|
| 226 |
+
}
|
| 227 |
+
template<> __device__ __forceinline__ short saturate_cast<short>(double v)
|
| 228 |
+
{
|
| 229 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 230 |
+
short res = 0;
|
| 231 |
+
asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v));
|
| 232 |
+
return res;
|
| 233 |
+
#else
|
| 234 |
+
return saturate_cast<short>((float)v);
|
| 235 |
+
#endif
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
template<> __device__ __forceinline__ int saturate_cast<int>(uint v)
|
| 239 |
+
{
|
| 240 |
+
int res = 0;
|
| 241 |
+
asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v));
|
| 242 |
+
return res;
|
| 243 |
+
}
|
| 244 |
+
template<> __device__ __forceinline__ int saturate_cast<int>(float v)
|
| 245 |
+
{
|
| 246 |
+
return __float2int_rn(v);
|
| 247 |
+
}
|
| 248 |
+
template<> __device__ __forceinline__ int saturate_cast<int>(double v)
|
| 249 |
+
{
|
| 250 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 251 |
+
return __double2int_rn(v);
|
| 252 |
+
#else
|
| 253 |
+
return saturate_cast<int>((float)v);
|
| 254 |
+
#endif
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
template<> __device__ __forceinline__ uint saturate_cast<uint>(schar v)
|
| 258 |
+
{
|
| 259 |
+
uint res = 0;
|
| 260 |
+
int vi = v;
|
| 261 |
+
asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi));
|
| 262 |
+
return res;
|
| 263 |
+
}
|
| 264 |
+
template<> __device__ __forceinline__ uint saturate_cast<uint>(short v)
|
| 265 |
+
{
|
| 266 |
+
uint res = 0;
|
| 267 |
+
asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v));
|
| 268 |
+
return res;
|
| 269 |
+
}
|
| 270 |
+
template<> __device__ __forceinline__ uint saturate_cast<uint>(int v)
|
| 271 |
+
{
|
| 272 |
+
uint res = 0;
|
| 273 |
+
asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v));
|
| 274 |
+
return res;
|
| 275 |
+
}
|
| 276 |
+
template<> __device__ __forceinline__ uint saturate_cast<uint>(float v)
|
| 277 |
+
{
|
| 278 |
+
return __float2uint_rn(v);
|
| 279 |
+
}
|
| 280 |
+
template<> __device__ __forceinline__ uint saturate_cast<uint>(double v)
|
| 281 |
+
{
|
| 282 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
|
| 283 |
+
return __double2uint_rn(v);
|
| 284 |
+
#else
|
| 285 |
+
return saturate_cast<uint>((float)v);
|
| 286 |
+
#endif
|
| 287 |
+
}
|
| 288 |
+
}}}
|
| 289 |
+
|
| 290 |
+
//! @endcond
|
| 291 |
+
|
| 292 |
+
#endif /* OPENCV_CUDA_SATURATE_CAST_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda/scan.hpp
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_SCAN_HPP
|
| 44 |
+
#define OPENCV_CUDA_SCAN_HPP
|
| 45 |
+
|
| 46 |
+
#include "opencv2/core/cuda/common.hpp"
|
| 47 |
+
#include "opencv2/core/cuda/utility.hpp"
|
| 48 |
+
#include "opencv2/core/cuda/warp.hpp"
|
| 49 |
+
#include "opencv2/core/cuda/warp_shuffle.hpp"
|
| 50 |
+
|
| 51 |
+
/** @file
|
| 52 |
+
* @deprecated Use @ref cudev instead.
|
| 53 |
+
*/
|
| 54 |
+
|
| 55 |
+
//! @cond IGNORED
|
| 56 |
+
|
| 57 |
+
namespace cv { namespace cuda { namespace device
|
| 58 |
+
{
|
| 59 |
+
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
|
| 60 |
+
|
| 61 |
+
template <ScanKind Kind, typename T, typename F> struct WarpScan
|
| 62 |
+
{
|
| 63 |
+
__device__ __forceinline__ WarpScan() {}
|
| 64 |
+
__device__ __forceinline__ WarpScan(const WarpScan& other) { CV_UNUSED(other); }
|
| 65 |
+
|
| 66 |
+
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
| 67 |
+
{
|
| 68 |
+
const unsigned int lane = idx & 31;
|
| 69 |
+
F op;
|
| 70 |
+
|
| 71 |
+
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
| 72 |
+
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
| 73 |
+
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
| 74 |
+
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
| 75 |
+
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
| 76 |
+
|
| 77 |
+
if( Kind == INCLUSIVE )
|
| 78 |
+
return ptr [idx];
|
| 79 |
+
else
|
| 80 |
+
return (lane > 0) ? ptr [idx - 1] : 0;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
| 84 |
+
{
|
| 85 |
+
return tid;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
__device__ __forceinline__ void init(volatile T *ptr){}
|
| 89 |
+
|
| 90 |
+
static const int warp_offset = 0;
|
| 91 |
+
|
| 92 |
+
typedef WarpScan<INCLUSIVE, T, F> merge;
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
|
| 96 |
+
{
|
| 97 |
+
__device__ __forceinline__ WarpScanNoComp() {}
|
| 98 |
+
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { CV_UNUSED(other); }
|
| 99 |
+
|
| 100 |
+
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
| 101 |
+
{
|
| 102 |
+
const unsigned int lane = threadIdx.x & 31;
|
| 103 |
+
F op;
|
| 104 |
+
|
| 105 |
+
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
| 106 |
+
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
| 107 |
+
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
| 108 |
+
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
| 109 |
+
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
| 110 |
+
|
| 111 |
+
if( Kind == INCLUSIVE )
|
| 112 |
+
return ptr [idx];
|
| 113 |
+
else
|
| 114 |
+
return (lane > 0) ? ptr [idx - 1] : 0;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
| 118 |
+
{
|
| 119 |
+
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
__device__ __forceinline__ void init(volatile T *ptr)
|
| 123 |
+
{
|
| 124 |
+
ptr[threadIdx.x] = 0;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
static const int warp_smem_stride = 32 + 16 + 1;
|
| 128 |
+
static const int warp_offset = 16;
|
| 129 |
+
static const int warp_log = 5;
|
| 130 |
+
static const int warp_mask = 31;
|
| 131 |
+
|
| 132 |
+
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
|
| 136 |
+
{
|
| 137 |
+
__device__ __forceinline__ BlockScan() {}
|
| 138 |
+
__device__ __forceinline__ BlockScan(const BlockScan& other) { CV_UNUSED(other); }
|
| 139 |
+
|
| 140 |
+
__device__ __forceinline__ T operator()(volatile T *ptr)
|
| 141 |
+
{
|
| 142 |
+
const unsigned int tid = threadIdx.x;
|
| 143 |
+
const unsigned int lane = tid & warp_mask;
|
| 144 |
+
const unsigned int warp = tid >> warp_log;
|
| 145 |
+
|
| 146 |
+
Sc scan;
|
| 147 |
+
typename Sc::merge merge_scan;
|
| 148 |
+
const unsigned int idx = scan.index(tid);
|
| 149 |
+
|
| 150 |
+
T val = scan(ptr, idx);
|
| 151 |
+
__syncthreads ();
|
| 152 |
+
|
| 153 |
+
if( warp == 0)
|
| 154 |
+
scan.init(ptr);
|
| 155 |
+
__syncthreads ();
|
| 156 |
+
|
| 157 |
+
if( lane == 31 )
|
| 158 |
+
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
|
| 159 |
+
__syncthreads ();
|
| 160 |
+
|
| 161 |
+
if( warp == 0 )
|
| 162 |
+
merge_scan(ptr, idx);
|
| 163 |
+
__syncthreads();
|
| 164 |
+
|
| 165 |
+
if ( warp > 0)
|
| 166 |
+
val = ptr [scan.warp_offset + warp - 1] + val;
|
| 167 |
+
__syncthreads ();
|
| 168 |
+
|
| 169 |
+
ptr[idx] = val;
|
| 170 |
+
__syncthreads ();
|
| 171 |
+
|
| 172 |
+
return val ;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
static const int warp_log = 5;
|
| 176 |
+
static const int warp_mask = 31;
|
| 177 |
+
};
|
| 178 |
+
|
| 179 |
+
template <typename T>
|
| 180 |
+
__device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
|
| 181 |
+
{
|
| 182 |
+
#if __CUDA_ARCH__ >= 300
|
| 183 |
+
const unsigned int laneId = cv::cuda::device::Warp::laneId();
|
| 184 |
+
|
| 185 |
+
// scan on shuffl functions
|
| 186 |
+
#pragma unroll
|
| 187 |
+
for (int i = 1; i <= (OPENCV_CUDA_WARP_SIZE / 2); i *= 2)
|
| 188 |
+
{
|
| 189 |
+
const T n = cv::cuda::device::shfl_up(idata, i);
|
| 190 |
+
if (laneId >= i)
|
| 191 |
+
idata += n;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
return idata;
|
| 195 |
+
#else
|
| 196 |
+
unsigned int pos = 2 * tid - (tid & (OPENCV_CUDA_WARP_SIZE - 1));
|
| 197 |
+
s_Data[pos] = 0;
|
| 198 |
+
pos += OPENCV_CUDA_WARP_SIZE;
|
| 199 |
+
s_Data[pos] = idata;
|
| 200 |
+
|
| 201 |
+
s_Data[pos] += s_Data[pos - 1];
|
| 202 |
+
s_Data[pos] += s_Data[pos - 2];
|
| 203 |
+
s_Data[pos] += s_Data[pos - 4];
|
| 204 |
+
s_Data[pos] += s_Data[pos - 8];
|
| 205 |
+
s_Data[pos] += s_Data[pos - 16];
|
| 206 |
+
|
| 207 |
+
return s_Data[pos];
|
| 208 |
+
#endif
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
template <typename T>
|
| 212 |
+
__device__ __forceinline__ T warpScanExclusive(T idata, volatile T* s_Data, unsigned int tid)
|
| 213 |
+
{
|
| 214 |
+
return warpScanInclusive(idata, s_Data, tid) - idata;
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
template <int tiNumScanThreads, typename T>
|
| 218 |
+
__device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
|
| 219 |
+
{
|
| 220 |
+
if (tiNumScanThreads > OPENCV_CUDA_WARP_SIZE)
|
| 221 |
+
{
|
| 222 |
+
//Bottom-level inclusive warp scan
|
| 223 |
+
T warpResult = warpScanInclusive(idata, s_Data, tid);
|
| 224 |
+
|
| 225 |
+
//Save top elements of each warp for exclusive warp scan
|
| 226 |
+
//sync to wait for warp scans to complete (because s_Data is being overwritten)
|
| 227 |
+
__syncthreads();
|
| 228 |
+
if ((tid & (OPENCV_CUDA_WARP_SIZE - 1)) == (OPENCV_CUDA_WARP_SIZE - 1))
|
| 229 |
+
{
|
| 230 |
+
s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE] = warpResult;
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
//wait for warp scans to complete
|
| 234 |
+
__syncthreads();
|
| 235 |
+
|
| 236 |
+
if (tid < (tiNumScanThreads / OPENCV_CUDA_WARP_SIZE) )
|
| 237 |
+
{
|
| 238 |
+
//grab top warp elements
|
| 239 |
+
T val = s_Data[tid];
|
| 240 |
+
//calculate exclusive scan and write back to shared memory
|
| 241 |
+
s_Data[tid] = warpScanExclusive(val, s_Data, tid);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
//return updated warp scans with exclusive scan results
|
| 245 |
+
__syncthreads();
|
| 246 |
+
|
| 247 |
+
return warpResult + s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE];
|
| 248 |
+
}
|
| 249 |
+
else
|
| 250 |
+
{
|
| 251 |
+
return warpScanInclusive(idata, s_Data, tid);
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
}}}
|
| 255 |
+
|
| 256 |
+
//! @endcond
|
| 257 |
+
|
| 258 |
+
#endif // OPENCV_CUDA_SCAN_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/simd_functions.hpp
ADDED
|
@@ -0,0 +1,869 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
/*
|
| 44 |
+
* Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
|
| 45 |
+
*
|
| 46 |
+
* Redistribution and use in source and binary forms, with or without
|
| 47 |
+
* modification, are permitted provided that the following conditions are met:
|
| 48 |
+
*
|
| 49 |
+
* Redistributions of source code must retain the above copyright notice,
|
| 50 |
+
* this list of conditions and the following disclaimer.
|
| 51 |
+
*
|
| 52 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 53 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 54 |
+
* and/or other materials provided with the distribution.
|
| 55 |
+
*
|
| 56 |
+
* Neither the name of NVIDIA Corporation nor the names of its contributors
|
| 57 |
+
* may be used to endorse or promote products derived from this software
|
| 58 |
+
* without specific prior written permission.
|
| 59 |
+
*
|
| 60 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 61 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 62 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 63 |
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 64 |
+
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 65 |
+
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 66 |
+
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 67 |
+
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 68 |
+
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 69 |
+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 70 |
+
* POSSIBILITY OF SUCH DAMAGE.
|
| 71 |
+
*/
|
| 72 |
+
|
| 73 |
+
#ifndef OPENCV_CUDA_SIMD_FUNCTIONS_HPP
|
| 74 |
+
#define OPENCV_CUDA_SIMD_FUNCTIONS_HPP
|
| 75 |
+
|
| 76 |
+
#include "common.hpp"
|
| 77 |
+
|
| 78 |
+
/** @file
|
| 79 |
+
* @deprecated Use @ref cudev instead.
|
| 80 |
+
*/
|
| 81 |
+
|
| 82 |
+
//! @cond IGNORED
|
| 83 |
+
|
| 84 |
+
namespace cv { namespace cuda { namespace device
|
| 85 |
+
{
|
| 86 |
+
// 2
|
| 87 |
+
|
| 88 |
+
static __device__ __forceinline__ unsigned int vadd2(unsigned int a, unsigned int b)
|
| 89 |
+
{
|
| 90 |
+
unsigned int r = 0;
|
| 91 |
+
|
| 92 |
+
#if __CUDA_ARCH__ >= 300
|
| 93 |
+
asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 94 |
+
#elif __CUDA_ARCH__ >= 200
|
| 95 |
+
asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 96 |
+
asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 97 |
+
#else
|
| 98 |
+
unsigned int s;
|
| 99 |
+
s = a ^ b; // sum bits
|
| 100 |
+
r = a + b; // actual sum
|
| 101 |
+
s = s ^ r; // determine carry-ins for each bit position
|
| 102 |
+
s = s & 0x00010000; // carry-in to high word (= carry-out from low word)
|
| 103 |
+
r = r - s; // subtract out carry-out from low word
|
| 104 |
+
#endif
|
| 105 |
+
|
| 106 |
+
return r;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
static __device__ __forceinline__ unsigned int vsub2(unsigned int a, unsigned int b)
|
| 110 |
+
{
|
| 111 |
+
unsigned int r = 0;
|
| 112 |
+
|
| 113 |
+
#if __CUDA_ARCH__ >= 300
|
| 114 |
+
asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 115 |
+
#elif __CUDA_ARCH__ >= 200
|
| 116 |
+
asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 117 |
+
asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 118 |
+
#else
|
| 119 |
+
unsigned int s;
|
| 120 |
+
s = a ^ b; // sum bits
|
| 121 |
+
r = a - b; // actual sum
|
| 122 |
+
s = s ^ r; // determine carry-ins for each bit position
|
| 123 |
+
s = s & 0x00010000; // borrow to high word
|
| 124 |
+
r = r + s; // compensate for borrow from low word
|
| 125 |
+
#endif
|
| 126 |
+
|
| 127 |
+
return r;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
static __device__ __forceinline__ unsigned int vabsdiff2(unsigned int a, unsigned int b)
|
| 131 |
+
{
|
| 132 |
+
unsigned int r = 0;
|
| 133 |
+
|
| 134 |
+
#if __CUDA_ARCH__ >= 300
|
| 135 |
+
asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 136 |
+
#elif __CUDA_ARCH__ >= 200
|
| 137 |
+
asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 138 |
+
asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 139 |
+
#else
|
| 140 |
+
unsigned int s, t, u, v;
|
| 141 |
+
s = a & 0x0000ffff; // extract low halfword
|
| 142 |
+
r = b & 0x0000ffff; // extract low halfword
|
| 143 |
+
u = ::max(r, s); // maximum of low halfwords
|
| 144 |
+
v = ::min(r, s); // minimum of low halfwords
|
| 145 |
+
s = a & 0xffff0000; // extract high halfword
|
| 146 |
+
r = b & 0xffff0000; // extract high halfword
|
| 147 |
+
t = ::max(r, s); // maximum of high halfwords
|
| 148 |
+
s = ::min(r, s); // minimum of high halfwords
|
| 149 |
+
r = u | t; // maximum of both halfwords
|
| 150 |
+
s = v | s; // minimum of both halfwords
|
| 151 |
+
r = r - s; // |a - b| = max(a,b) - min(a,b);
|
| 152 |
+
#endif
|
| 153 |
+
|
| 154 |
+
return r;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
static __device__ __forceinline__ unsigned int vavg2(unsigned int a, unsigned int b)
|
| 158 |
+
{
|
| 159 |
+
unsigned int r, s;
|
| 160 |
+
|
| 161 |
+
// HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>
|
| 162 |
+
// (a + b) / 2 = (a & b) + ((a ^ b) >> 1)
|
| 163 |
+
s = a ^ b;
|
| 164 |
+
r = a & b;
|
| 165 |
+
s = s & 0xfffefffe; // ensure shift doesn't cross halfword boundaries
|
| 166 |
+
s = s >> 1;
|
| 167 |
+
s = r + s;
|
| 168 |
+
|
| 169 |
+
return s;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
static __device__ __forceinline__ unsigned int vavrg2(unsigned int a, unsigned int b)
|
| 173 |
+
{
|
| 174 |
+
unsigned int r = 0;
|
| 175 |
+
|
| 176 |
+
#if __CUDA_ARCH__ >= 300
|
| 177 |
+
asm("vavrg2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 178 |
+
#else
|
| 179 |
+
// HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>
|
| 180 |
+
// (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)
|
| 181 |
+
unsigned int s;
|
| 182 |
+
s = a ^ b;
|
| 183 |
+
r = a | b;
|
| 184 |
+
s = s & 0xfffefffe; // ensure shift doesn't cross half-word boundaries
|
| 185 |
+
s = s >> 1;
|
| 186 |
+
r = r - s;
|
| 187 |
+
#endif
|
| 188 |
+
|
| 189 |
+
return r;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
static __device__ __forceinline__ unsigned int vseteq2(unsigned int a, unsigned int b)
|
| 193 |
+
{
|
| 194 |
+
unsigned int r = 0;
|
| 195 |
+
|
| 196 |
+
#if __CUDA_ARCH__ >= 300
|
| 197 |
+
asm("vset2.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 198 |
+
#else
|
| 199 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 200 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 201 |
+
unsigned int c;
|
| 202 |
+
r = a ^ b; // 0x0000 if a == b
|
| 203 |
+
c = r | 0x80008000; // set msbs, to catch carry out
|
| 204 |
+
r = r ^ c; // extract msbs, msb = 1 if r < 0x8000
|
| 205 |
+
c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
|
| 206 |
+
c = r & ~c; // msb = 1, if r was 0x0000
|
| 207 |
+
r = c >> 15; // convert to bool
|
| 208 |
+
#endif
|
| 209 |
+
|
| 210 |
+
return r;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
static __device__ __forceinline__ unsigned int vcmpeq2(unsigned int a, unsigned int b)
|
| 214 |
+
{
|
| 215 |
+
unsigned int r, c;
|
| 216 |
+
|
| 217 |
+
#if __CUDA_ARCH__ >= 300
|
| 218 |
+
r = vseteq2(a, b);
|
| 219 |
+
c = r << 16; // convert bool
|
| 220 |
+
r = c - r; // into mask
|
| 221 |
+
#else
|
| 222 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 223 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 224 |
+
r = a ^ b; // 0x0000 if a == b
|
| 225 |
+
c = r | 0x80008000; // set msbs, to catch carry out
|
| 226 |
+
r = r ^ c; // extract msbs, msb = 1 if r < 0x8000
|
| 227 |
+
c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
|
| 228 |
+
c = r & ~c; // msb = 1, if r was 0x0000
|
| 229 |
+
r = c >> 15; // convert
|
| 230 |
+
r = c - r; // msbs to
|
| 231 |
+
r = c | r; // mask
|
| 232 |
+
#endif
|
| 233 |
+
|
| 234 |
+
return r;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
static __device__ __forceinline__ unsigned int vsetge2(unsigned int a, unsigned int b)
|
| 238 |
+
{
|
| 239 |
+
unsigned int r = 0;
|
| 240 |
+
|
| 241 |
+
#if __CUDA_ARCH__ >= 300
|
| 242 |
+
asm("vset2.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 243 |
+
#else
|
| 244 |
+
unsigned int c;
|
| 245 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 246 |
+
c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
|
| 247 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 248 |
+
r = c >> 15; // convert to bool
|
| 249 |
+
#endif
|
| 250 |
+
|
| 251 |
+
return r;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
static __device__ __forceinline__ unsigned int vcmpge2(unsigned int a, unsigned int b)
|
| 255 |
+
{
|
| 256 |
+
unsigned int r, c;
|
| 257 |
+
|
| 258 |
+
#if __CUDA_ARCH__ >= 300
|
| 259 |
+
r = vsetge2(a, b);
|
| 260 |
+
c = r << 16; // convert bool
|
| 261 |
+
r = c - r; // into mask
|
| 262 |
+
#else
|
| 263 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 264 |
+
c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
|
| 265 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 266 |
+
r = c >> 15; // convert
|
| 267 |
+
r = c - r; // msbs to
|
| 268 |
+
r = c | r; // mask
|
| 269 |
+
#endif
|
| 270 |
+
|
| 271 |
+
return r;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
static __device__ __forceinline__ unsigned int vsetgt2(unsigned int a, unsigned int b)
|
| 275 |
+
{
|
| 276 |
+
unsigned int r = 0;
|
| 277 |
+
|
| 278 |
+
#if __CUDA_ARCH__ >= 300
|
| 279 |
+
asm("vset2.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 280 |
+
#else
|
| 281 |
+
unsigned int c;
|
| 282 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 283 |
+
c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
|
| 284 |
+
c = c & 0x80008000; // msbs = carry-outs
|
| 285 |
+
r = c >> 15; // convert to bool
|
| 286 |
+
#endif
|
| 287 |
+
|
| 288 |
+
return r;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
static __device__ __forceinline__ unsigned int vcmpgt2(unsigned int a, unsigned int b)
|
| 292 |
+
{
|
| 293 |
+
unsigned int r, c;
|
| 294 |
+
|
| 295 |
+
#if __CUDA_ARCH__ >= 300
|
| 296 |
+
r = vsetgt2(a, b);
|
| 297 |
+
c = r << 16; // convert bool
|
| 298 |
+
r = c - r; // into mask
|
| 299 |
+
#else
|
| 300 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 301 |
+
c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
|
| 302 |
+
c = c & 0x80008000; // msbs = carry-outs
|
| 303 |
+
r = c >> 15; // convert
|
| 304 |
+
r = c - r; // msbs to
|
| 305 |
+
r = c | r; // mask
|
| 306 |
+
#endif
|
| 307 |
+
|
| 308 |
+
return r;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
static __device__ __forceinline__ unsigned int vsetle2(unsigned int a, unsigned int b)
|
| 312 |
+
{
|
| 313 |
+
unsigned int r = 0;
|
| 314 |
+
|
| 315 |
+
#if __CUDA_ARCH__ >= 300
|
| 316 |
+
asm("vset2.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 317 |
+
#else
|
| 318 |
+
unsigned int c;
|
| 319 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 320 |
+
c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
|
| 321 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 322 |
+
r = c >> 15; // convert to bool
|
| 323 |
+
#endif
|
| 324 |
+
|
| 325 |
+
return r;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
static __device__ __forceinline__ unsigned int vcmple2(unsigned int a, unsigned int b)
|
| 329 |
+
{
|
| 330 |
+
unsigned int r, c;
|
| 331 |
+
|
| 332 |
+
#if __CUDA_ARCH__ >= 300
|
| 333 |
+
r = vsetle2(a, b);
|
| 334 |
+
c = r << 16; // convert bool
|
| 335 |
+
r = c - r; // into mask
|
| 336 |
+
#else
|
| 337 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 338 |
+
c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
|
| 339 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 340 |
+
r = c >> 15; // convert
|
| 341 |
+
r = c - r; // msbs to
|
| 342 |
+
r = c | r; // mask
|
| 343 |
+
#endif
|
| 344 |
+
|
| 345 |
+
return r;
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
static __device__ __forceinline__ unsigned int vsetlt2(unsigned int a, unsigned int b)
|
| 349 |
+
{
|
| 350 |
+
unsigned int r = 0;
|
| 351 |
+
|
| 352 |
+
#if __CUDA_ARCH__ >= 300
|
| 353 |
+
asm("vset2.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 354 |
+
#else
|
| 355 |
+
unsigned int c;
|
| 356 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 357 |
+
c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
|
| 358 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 359 |
+
r = c >> 15; // convert to bool
|
| 360 |
+
#endif
|
| 361 |
+
|
| 362 |
+
return r;
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
static __device__ __forceinline__ unsigned int vcmplt2(unsigned int a, unsigned int b)
|
| 366 |
+
{
|
| 367 |
+
unsigned int r, c;
|
| 368 |
+
|
| 369 |
+
#if __CUDA_ARCH__ >= 300
|
| 370 |
+
r = vsetlt2(a, b);
|
| 371 |
+
c = r << 16; // convert bool
|
| 372 |
+
r = c - r; // into mask
|
| 373 |
+
#else
|
| 374 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 375 |
+
c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
|
| 376 |
+
c = c & 0x80008000; // msb = carry-outs
|
| 377 |
+
r = c >> 15; // convert
|
| 378 |
+
r = c - r; // msbs to
|
| 379 |
+
r = c | r; // mask
|
| 380 |
+
#endif
|
| 381 |
+
|
| 382 |
+
return r;
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
static __device__ __forceinline__ unsigned int vsetne2(unsigned int a, unsigned int b)
|
| 386 |
+
{
|
| 387 |
+
unsigned int r = 0;
|
| 388 |
+
|
| 389 |
+
#if __CUDA_ARCH__ >= 300
|
| 390 |
+
asm ("vset2.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 391 |
+
#else
|
| 392 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 393 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 394 |
+
unsigned int c;
|
| 395 |
+
r = a ^ b; // 0x0000 if a == b
|
| 396 |
+
c = r | 0x80008000; // set msbs, to catch carry out
|
| 397 |
+
c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
|
| 398 |
+
c = r | c; // msb = 1, if r was not 0x0000
|
| 399 |
+
c = c & 0x80008000; // extract msbs
|
| 400 |
+
r = c >> 15; // convert to bool
|
| 401 |
+
#endif
|
| 402 |
+
|
| 403 |
+
return r;
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
static __device__ __forceinline__ unsigned int vcmpne2(unsigned int a, unsigned int b)
|
| 407 |
+
{
|
| 408 |
+
unsigned int r, c;
|
| 409 |
+
|
| 410 |
+
#if __CUDA_ARCH__ >= 300
|
| 411 |
+
r = vsetne2(a, b);
|
| 412 |
+
c = r << 16; // convert bool
|
| 413 |
+
r = c - r; // into mask
|
| 414 |
+
#else
|
| 415 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 416 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 417 |
+
r = a ^ b; // 0x0000 if a == b
|
| 418 |
+
c = r | 0x80008000; // set msbs, to catch carry out
|
| 419 |
+
c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
|
| 420 |
+
c = r | c; // msb = 1, if r was not 0x0000
|
| 421 |
+
c = c & 0x80008000; // extract msbs
|
| 422 |
+
r = c >> 15; // convert
|
| 423 |
+
r = c - r; // msbs to
|
| 424 |
+
r = c | r; // mask
|
| 425 |
+
#endif
|
| 426 |
+
|
| 427 |
+
return r;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
static __device__ __forceinline__ unsigned int vmax2(unsigned int a, unsigned int b)
|
| 431 |
+
{
|
| 432 |
+
unsigned int r = 0;
|
| 433 |
+
|
| 434 |
+
#if __CUDA_ARCH__ >= 300
|
| 435 |
+
asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 436 |
+
#elif __CUDA_ARCH__ >= 200
|
| 437 |
+
asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 438 |
+
asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 439 |
+
#else
|
| 440 |
+
unsigned int s, t, u;
|
| 441 |
+
r = a & 0x0000ffff; // extract low halfword
|
| 442 |
+
s = b & 0x0000ffff; // extract low halfword
|
| 443 |
+
t = ::max(r, s); // maximum of low halfwords
|
| 444 |
+
r = a & 0xffff0000; // extract high halfword
|
| 445 |
+
s = b & 0xffff0000; // extract high halfword
|
| 446 |
+
u = ::max(r, s); // maximum of high halfwords
|
| 447 |
+
r = t | u; // combine halfword maximums
|
| 448 |
+
#endif
|
| 449 |
+
|
| 450 |
+
return r;
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
static __device__ __forceinline__ unsigned int vmin2(unsigned int a, unsigned int b)
|
| 454 |
+
{
|
| 455 |
+
unsigned int r = 0;
|
| 456 |
+
|
| 457 |
+
#if __CUDA_ARCH__ >= 300
|
| 458 |
+
asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 459 |
+
#elif __CUDA_ARCH__ >= 200
|
| 460 |
+
asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 461 |
+
asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 462 |
+
#else
|
| 463 |
+
unsigned int s, t, u;
|
| 464 |
+
r = a & 0x0000ffff; // extract low halfword
|
| 465 |
+
s = b & 0x0000ffff; // extract low halfword
|
| 466 |
+
t = ::min(r, s); // minimum of low halfwords
|
| 467 |
+
r = a & 0xffff0000; // extract high halfword
|
| 468 |
+
s = b & 0xffff0000; // extract high halfword
|
| 469 |
+
u = ::min(r, s); // minimum of high halfwords
|
| 470 |
+
r = t | u; // combine halfword minimums
|
| 471 |
+
#endif
|
| 472 |
+
|
| 473 |
+
return r;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
// 4
|
| 477 |
+
|
| 478 |
+
static __device__ __forceinline__ unsigned int vadd4(unsigned int a, unsigned int b)
|
| 479 |
+
{
|
| 480 |
+
unsigned int r = 0;
|
| 481 |
+
|
| 482 |
+
#if __CUDA_ARCH__ >= 300
|
| 483 |
+
asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 484 |
+
#elif __CUDA_ARCH__ >= 200
|
| 485 |
+
asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 486 |
+
asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 487 |
+
asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 488 |
+
asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 489 |
+
#else
|
| 490 |
+
unsigned int s, t;
|
| 491 |
+
s = a ^ b; // sum bits
|
| 492 |
+
r = a & 0x7f7f7f7f; // clear msbs
|
| 493 |
+
t = b & 0x7f7f7f7f; // clear msbs
|
| 494 |
+
s = s & 0x80808080; // msb sum bits
|
| 495 |
+
r = r + t; // add without msbs, record carry-out in msbs
|
| 496 |
+
r = r ^ s; // sum of msb sum and carry-in bits, w/o carry-out
|
| 497 |
+
#endif /* __CUDA_ARCH__ >= 300 */
|
| 498 |
+
|
| 499 |
+
return r;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
static __device__ __forceinline__ unsigned int vsub4(unsigned int a, unsigned int b)
|
| 503 |
+
{
|
| 504 |
+
unsigned int r = 0;
|
| 505 |
+
|
| 506 |
+
#if __CUDA_ARCH__ >= 300
|
| 507 |
+
asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 508 |
+
#elif __CUDA_ARCH__ >= 200
|
| 509 |
+
asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 510 |
+
asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 511 |
+
asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 512 |
+
asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 513 |
+
#else
|
| 514 |
+
unsigned int s, t;
|
| 515 |
+
s = a ^ ~b; // inverted sum bits
|
| 516 |
+
r = a | 0x80808080; // set msbs
|
| 517 |
+
t = b & 0x7f7f7f7f; // clear msbs
|
| 518 |
+
s = s & 0x80808080; // inverted msb sum bits
|
| 519 |
+
r = r - t; // subtract w/o msbs, record inverted borrows in msb
|
| 520 |
+
r = r ^ s; // combine inverted msb sum bits and borrows
|
| 521 |
+
#endif
|
| 522 |
+
|
| 523 |
+
return r;
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
static __device__ __forceinline__ unsigned int vavg4(unsigned int a, unsigned int b)
|
| 527 |
+
{
|
| 528 |
+
unsigned int r, s;
|
| 529 |
+
|
| 530 |
+
// HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>
|
| 531 |
+
// (a + b) / 2 = (a & b) + ((a ^ b) >> 1)
|
| 532 |
+
s = a ^ b;
|
| 533 |
+
r = a & b;
|
| 534 |
+
s = s & 0xfefefefe; // ensure following shift doesn't cross byte boundaries
|
| 535 |
+
s = s >> 1;
|
| 536 |
+
s = r + s;
|
| 537 |
+
|
| 538 |
+
return s;
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
static __device__ __forceinline__ unsigned int vavrg4(unsigned int a, unsigned int b)
|
| 542 |
+
{
|
| 543 |
+
unsigned int r = 0;
|
| 544 |
+
|
| 545 |
+
#if __CUDA_ARCH__ >= 300
|
| 546 |
+
asm("vavrg4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 547 |
+
#else
|
| 548 |
+
// HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>
|
| 549 |
+
// (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)
|
| 550 |
+
unsigned int c;
|
| 551 |
+
c = a ^ b;
|
| 552 |
+
r = a | b;
|
| 553 |
+
c = c & 0xfefefefe; // ensure following shift doesn't cross byte boundaries
|
| 554 |
+
c = c >> 1;
|
| 555 |
+
r = r - c;
|
| 556 |
+
#endif
|
| 557 |
+
|
| 558 |
+
return r;
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
static __device__ __forceinline__ unsigned int vseteq4(unsigned int a, unsigned int b)
|
| 562 |
+
{
|
| 563 |
+
unsigned int r = 0;
|
| 564 |
+
|
| 565 |
+
#if __CUDA_ARCH__ >= 300
|
| 566 |
+
asm("vset4.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 567 |
+
#else
|
| 568 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 569 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 570 |
+
unsigned int c;
|
| 571 |
+
r = a ^ b; // 0x00 if a == b
|
| 572 |
+
c = r | 0x80808080; // set msbs, to catch carry out
|
| 573 |
+
r = r ^ c; // extract msbs, msb = 1 if r < 0x80
|
| 574 |
+
c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
|
| 575 |
+
c = r & ~c; // msb = 1, if r was 0x00
|
| 576 |
+
r = c >> 7; // convert to bool
|
| 577 |
+
#endif
|
| 578 |
+
|
| 579 |
+
return r;
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
static __device__ __forceinline__ unsigned int vcmpeq4(unsigned int a, unsigned int b)
|
| 583 |
+
{
|
| 584 |
+
unsigned int r, t;
|
| 585 |
+
|
| 586 |
+
#if __CUDA_ARCH__ >= 300
|
| 587 |
+
r = vseteq4(a, b);
|
| 588 |
+
t = r << 8; // convert bool
|
| 589 |
+
r = t - r; // to mask
|
| 590 |
+
#else
|
| 591 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 592 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 593 |
+
t = a ^ b; // 0x00 if a == b
|
| 594 |
+
r = t | 0x80808080; // set msbs, to catch carry out
|
| 595 |
+
t = t ^ r; // extract msbs, msb = 1 if t < 0x80
|
| 596 |
+
r = r - 0x01010101; // msb = 0, if t was 0x00 or 0x80
|
| 597 |
+
r = t & ~r; // msb = 1, if t was 0x00
|
| 598 |
+
t = r >> 7; // build mask
|
| 599 |
+
t = r - t; // from
|
| 600 |
+
r = t | r; // msbs
|
| 601 |
+
#endif
|
| 602 |
+
|
| 603 |
+
return r;
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
static __device__ __forceinline__ unsigned int vsetle4(unsigned int a, unsigned int b)
|
| 607 |
+
{
|
| 608 |
+
unsigned int r = 0;
|
| 609 |
+
|
| 610 |
+
#if __CUDA_ARCH__ >= 300
|
| 611 |
+
asm("vset4.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 612 |
+
#else
|
| 613 |
+
unsigned int c;
|
| 614 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 615 |
+
c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
|
| 616 |
+
c = c & 0x80808080; // msb = carry-outs
|
| 617 |
+
r = c >> 7; // convert to bool
|
| 618 |
+
#endif
|
| 619 |
+
|
| 620 |
+
return r;
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
static __device__ __forceinline__ unsigned int vcmple4(unsigned int a, unsigned int b)
|
| 624 |
+
{
|
| 625 |
+
unsigned int r, c;
|
| 626 |
+
|
| 627 |
+
#if __CUDA_ARCH__ >= 300
|
| 628 |
+
r = vsetle4(a, b);
|
| 629 |
+
c = r << 8; // convert bool
|
| 630 |
+
r = c - r; // to mask
|
| 631 |
+
#else
|
| 632 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 633 |
+
c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
|
| 634 |
+
c = c & 0x80808080; // msbs = carry-outs
|
| 635 |
+
r = c >> 7; // convert
|
| 636 |
+
r = c - r; // msbs to
|
| 637 |
+
r = c | r; // mask
|
| 638 |
+
#endif
|
| 639 |
+
|
| 640 |
+
return r;
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
static __device__ __forceinline__ unsigned int vsetlt4(unsigned int a, unsigned int b)
|
| 644 |
+
{
|
| 645 |
+
unsigned int r = 0;
|
| 646 |
+
|
| 647 |
+
#if __CUDA_ARCH__ >= 300
|
| 648 |
+
asm("vset4.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 649 |
+
#else
|
| 650 |
+
unsigned int c;
|
| 651 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 652 |
+
c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
|
| 653 |
+
c = c & 0x80808080; // msb = carry-outs
|
| 654 |
+
r = c >> 7; // convert to bool
|
| 655 |
+
#endif
|
| 656 |
+
|
| 657 |
+
return r;
|
| 658 |
+
}
|
| 659 |
+
|
| 660 |
+
static __device__ __forceinline__ unsigned int vcmplt4(unsigned int a, unsigned int b)
|
| 661 |
+
{
|
| 662 |
+
unsigned int r, c;
|
| 663 |
+
|
| 664 |
+
#if __CUDA_ARCH__ >= 300
|
| 665 |
+
r = vsetlt4(a, b);
|
| 666 |
+
c = r << 8; // convert bool
|
| 667 |
+
r = c - r; // to mask
|
| 668 |
+
#else
|
| 669 |
+
asm("not.b32 %0, %0;" : "+r"(a));
|
| 670 |
+
c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
|
| 671 |
+
c = c & 0x80808080; // msbs = carry-outs
|
| 672 |
+
r = c >> 7; // convert
|
| 673 |
+
r = c - r; // msbs to
|
| 674 |
+
r = c | r; // mask
|
| 675 |
+
#endif
|
| 676 |
+
|
| 677 |
+
return r;
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
static __device__ __forceinline__ unsigned int vsetge4(unsigned int a, unsigned int b)
|
| 681 |
+
{
|
| 682 |
+
unsigned int r = 0;
|
| 683 |
+
|
| 684 |
+
#if __CUDA_ARCH__ >= 300
|
| 685 |
+
asm("vset4.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 686 |
+
#else
|
| 687 |
+
unsigned int c;
|
| 688 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 689 |
+
c = vavrg4(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
|
| 690 |
+
c = c & 0x80808080; // msb = carry-outs
|
| 691 |
+
r = c >> 7; // convert to bool
|
| 692 |
+
#endif
|
| 693 |
+
|
| 694 |
+
return r;
|
| 695 |
+
}
|
| 696 |
+
|
| 697 |
+
static __device__ __forceinline__ unsigned int vcmpge4(unsigned int a, unsigned int b)
|
| 698 |
+
{
|
| 699 |
+
unsigned int r, s;
|
| 700 |
+
|
| 701 |
+
#if __CUDA_ARCH__ >= 300
|
| 702 |
+
r = vsetge4(a, b);
|
| 703 |
+
s = r << 8; // convert bool
|
| 704 |
+
r = s - r; // to mask
|
| 705 |
+
#else
|
| 706 |
+
asm ("not.b32 %0,%0;" : "+r"(b));
|
| 707 |
+
r = vavrg4 (a, b); // (a + ~b + 1) / 2 = (a - b) / 2
|
| 708 |
+
r = r & 0x80808080; // msb = carry-outs
|
| 709 |
+
s = r >> 7; // build mask
|
| 710 |
+
s = r - s; // from
|
| 711 |
+
r = s | r; // msbs
|
| 712 |
+
#endif
|
| 713 |
+
|
| 714 |
+
return r;
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
static __device__ __forceinline__ unsigned int vsetgt4(unsigned int a, unsigned int b)
|
| 718 |
+
{
|
| 719 |
+
unsigned int r = 0;
|
| 720 |
+
|
| 721 |
+
#if __CUDA_ARCH__ >= 300
|
| 722 |
+
asm("vset4.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 723 |
+
#else
|
| 724 |
+
unsigned int c;
|
| 725 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 726 |
+
c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
|
| 727 |
+
c = c & 0x80808080; // msb = carry-outs
|
| 728 |
+
r = c >> 7; // convert to bool
|
| 729 |
+
#endif
|
| 730 |
+
|
| 731 |
+
return r;
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
static __device__ __forceinline__ unsigned int vcmpgt4(unsigned int a, unsigned int b)
|
| 735 |
+
{
|
| 736 |
+
unsigned int r, c;
|
| 737 |
+
|
| 738 |
+
#if __CUDA_ARCH__ >= 300
|
| 739 |
+
r = vsetgt4(a, b);
|
| 740 |
+
c = r << 8; // convert bool
|
| 741 |
+
r = c - r; // to mask
|
| 742 |
+
#else
|
| 743 |
+
asm("not.b32 %0, %0;" : "+r"(b));
|
| 744 |
+
c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
|
| 745 |
+
c = c & 0x80808080; // msb = carry-outs
|
| 746 |
+
r = c >> 7; // convert
|
| 747 |
+
r = c - r; // msbs to
|
| 748 |
+
r = c | r; // mask
|
| 749 |
+
#endif
|
| 750 |
+
|
| 751 |
+
return r;
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
static __device__ __forceinline__ unsigned int vsetne4(unsigned int a, unsigned int b)
|
| 755 |
+
{
|
| 756 |
+
unsigned int r = 0;
|
| 757 |
+
|
| 758 |
+
#if __CUDA_ARCH__ >= 300
|
| 759 |
+
asm("vset4.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 760 |
+
#else
|
| 761 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 762 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 763 |
+
unsigned int c;
|
| 764 |
+
r = a ^ b; // 0x00 if a == b
|
| 765 |
+
c = r | 0x80808080; // set msbs, to catch carry out
|
| 766 |
+
c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
|
| 767 |
+
c = r | c; // msb = 1, if r was not 0x00
|
| 768 |
+
c = c & 0x80808080; // extract msbs
|
| 769 |
+
r = c >> 7; // convert to bool
|
| 770 |
+
#endif
|
| 771 |
+
|
| 772 |
+
return r;
|
| 773 |
+
}
|
| 774 |
+
|
| 775 |
+
static __device__ __forceinline__ unsigned int vcmpne4(unsigned int a, unsigned int b)
|
| 776 |
+
{
|
| 777 |
+
unsigned int r, c;
|
| 778 |
+
|
| 779 |
+
#if __CUDA_ARCH__ >= 300
|
| 780 |
+
r = vsetne4(a, b);
|
| 781 |
+
c = r << 8; // convert bool
|
| 782 |
+
r = c - r; // to mask
|
| 783 |
+
#else
|
| 784 |
+
// inspired by Alan Mycroft's null-byte detection algorithm:
|
| 785 |
+
// null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
|
| 786 |
+
r = a ^ b; // 0x00 if a == b
|
| 787 |
+
c = r | 0x80808080; // set msbs, to catch carry out
|
| 788 |
+
c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
|
| 789 |
+
c = r | c; // msb = 1, if r was not 0x00
|
| 790 |
+
c = c & 0x80808080; // extract msbs
|
| 791 |
+
r = c >> 7; // convert
|
| 792 |
+
r = c - r; // msbs to
|
| 793 |
+
r = c | r; // mask
|
| 794 |
+
#endif
|
| 795 |
+
|
| 796 |
+
return r;
|
| 797 |
+
}
|
| 798 |
+
|
| 799 |
+
static __device__ __forceinline__ unsigned int vabsdiff4(unsigned int a, unsigned int b)
|
| 800 |
+
{
|
| 801 |
+
unsigned int r = 0;
|
| 802 |
+
|
| 803 |
+
#if __CUDA_ARCH__ >= 300
|
| 804 |
+
asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 805 |
+
#elif __CUDA_ARCH__ >= 200
|
| 806 |
+
asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 807 |
+
asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 808 |
+
asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 809 |
+
asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 810 |
+
#else
|
| 811 |
+
unsigned int s;
|
| 812 |
+
s = vcmpge4(a, b); // mask = 0xff if a >= b
|
| 813 |
+
r = a ^ b; //
|
| 814 |
+
s = (r & s) ^ b; // select a when a >= b, else select b => max(a,b)
|
| 815 |
+
r = s ^ r; // select a when b >= a, else select b => min(a,b)
|
| 816 |
+
r = s - r; // |a - b| = max(a,b) - min(a,b);
|
| 817 |
+
#endif
|
| 818 |
+
|
| 819 |
+
return r;
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
static __device__ __forceinline__ unsigned int vmax4(unsigned int a, unsigned int b)
|
| 823 |
+
{
|
| 824 |
+
unsigned int r = 0;
|
| 825 |
+
|
| 826 |
+
#if __CUDA_ARCH__ >= 300
|
| 827 |
+
asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 828 |
+
#elif __CUDA_ARCH__ >= 200
|
| 829 |
+
asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 830 |
+
asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 831 |
+
asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 832 |
+
asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 833 |
+
#else
|
| 834 |
+
unsigned int s;
|
| 835 |
+
s = vcmpge4(a, b); // mask = 0xff if a >= b
|
| 836 |
+
r = a & s; // select a when b >= a
|
| 837 |
+
s = b & ~s; // select b when b < a
|
| 838 |
+
r = r | s; // combine byte selections
|
| 839 |
+
#endif
|
| 840 |
+
|
| 841 |
+
return r; // byte-wise unsigned maximum
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
static __device__ __forceinline__ unsigned int vmin4(unsigned int a, unsigned int b)
|
| 845 |
+
{
|
| 846 |
+
unsigned int r = 0;
|
| 847 |
+
|
| 848 |
+
#if __CUDA_ARCH__ >= 300
|
| 849 |
+
asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 850 |
+
#elif __CUDA_ARCH__ >= 200
|
| 851 |
+
asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 852 |
+
asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 853 |
+
asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 854 |
+
asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
|
| 855 |
+
#else
|
| 856 |
+
unsigned int s;
|
| 857 |
+
s = vcmpge4(b, a); // mask = 0xff if a >= b
|
| 858 |
+
r = a & s; // select a when b >= a
|
| 859 |
+
s = b & ~s; // select b when b < a
|
| 860 |
+
r = r | s; // combine byte selections
|
| 861 |
+
#endif
|
| 862 |
+
|
| 863 |
+
return r;
|
| 864 |
+
}
|
| 865 |
+
}}}
|
| 866 |
+
|
| 867 |
+
//! @endcond
|
| 868 |
+
|
| 869 |
+
#endif // OPENCV_CUDA_SIMD_FUNCTIONS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/transform.hpp
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_TRANSFORM_HPP
|
| 44 |
+
#define OPENCV_CUDA_TRANSFORM_HPP
|
| 45 |
+
|
| 46 |
+
#include "common.hpp"
|
| 47 |
+
#include "utility.hpp"
|
| 48 |
+
#include "detail/transform_detail.hpp"
|
| 49 |
+
|
| 50 |
+
/** @file
|
| 51 |
+
* @deprecated Use @ref cudev instead.
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
//! @cond IGNORED
|
| 55 |
+
|
| 56 |
+
namespace cv { namespace cuda { namespace device
|
| 57 |
+
{
|
| 58 |
+
template <typename T, typename D, typename UnOp, typename Mask>
|
| 59 |
+
static inline void transform(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, const Mask& mask, cudaStream_t stream)
|
| 60 |
+
{
|
| 61 |
+
typedef TransformFunctorTraits<UnOp> ft;
|
| 62 |
+
transform_detail::TransformDispatcher<VecTraits<T>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
| 66 |
+
static inline void transform(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, const Mask& mask, cudaStream_t stream)
|
| 67 |
+
{
|
| 68 |
+
typedef TransformFunctorTraits<BinOp> ft;
|
| 69 |
+
transform_detail::TransformDispatcher<VecTraits<T1>::cn == 1 && VecTraits<T2>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream);
|
| 70 |
+
}
|
| 71 |
+
}}}
|
| 72 |
+
|
| 73 |
+
//! @endcond
|
| 74 |
+
|
| 75 |
+
#endif // OPENCV_CUDA_TRANSFORM_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/type_traits.hpp
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_TYPE_TRAITS_HPP
|
| 44 |
+
#define OPENCV_CUDA_TYPE_TRAITS_HPP
|
| 45 |
+
|
| 46 |
+
#include "detail/type_traits_detail.hpp"
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
template <typename T> struct IsSimpleParameter
|
| 57 |
+
{
|
| 58 |
+
enum {value = type_traits_detail::IsIntegral<T>::value || type_traits_detail::IsFloat<T>::value ||
|
| 59 |
+
type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<T>::type>::value};
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
template <typename T> struct TypeTraits
|
| 63 |
+
{
|
| 64 |
+
typedef typename type_traits_detail::UnConst<T>::type NonConstType;
|
| 65 |
+
typedef typename type_traits_detail::UnVolatile<T>::type NonVolatileType;
|
| 66 |
+
typedef typename type_traits_detail::UnVolatile<typename type_traits_detail::UnConst<T>::type>::type UnqualifiedType;
|
| 67 |
+
typedef typename type_traits_detail::PointerTraits<UnqualifiedType>::type PointeeType;
|
| 68 |
+
typedef typename type_traits_detail::ReferenceTraits<T>::type ReferredType;
|
| 69 |
+
|
| 70 |
+
enum { isConst = type_traits_detail::UnConst<T>::value };
|
| 71 |
+
enum { isVolatile = type_traits_detail::UnVolatile<T>::value };
|
| 72 |
+
|
| 73 |
+
enum { isReference = type_traits_detail::ReferenceTraits<UnqualifiedType>::value };
|
| 74 |
+
enum { isPointer = type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<UnqualifiedType>::type>::value };
|
| 75 |
+
|
| 76 |
+
enum { isUnsignedInt = type_traits_detail::IsUnsignedIntegral<UnqualifiedType>::value };
|
| 77 |
+
enum { isSignedInt = type_traits_detail::IsSignedIntergral<UnqualifiedType>::value };
|
| 78 |
+
enum { isIntegral = type_traits_detail::IsIntegral<UnqualifiedType>::value };
|
| 79 |
+
enum { isFloat = type_traits_detail::IsFloat<UnqualifiedType>::value };
|
| 80 |
+
enum { isArith = isIntegral || isFloat };
|
| 81 |
+
enum { isVec = type_traits_detail::IsVec<UnqualifiedType>::value };
|
| 82 |
+
|
| 83 |
+
typedef typename type_traits_detail::Select<IsSimpleParameter<UnqualifiedType>::value,
|
| 84 |
+
T, typename type_traits_detail::AddParameterType<T>::type>::type ParameterType;
|
| 85 |
+
};
|
| 86 |
+
}}}
|
| 87 |
+
|
| 88 |
+
//! @endcond
|
| 89 |
+
|
| 90 |
+
#endif // OPENCV_CUDA_TYPE_TRAITS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/utility.hpp
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_UTILITY_HPP
|
| 44 |
+
#define OPENCV_CUDA_UTILITY_HPP
|
| 45 |
+
|
| 46 |
+
#include "saturate_cast.hpp"
|
| 47 |
+
#include "datamov_utils.hpp"
|
| 48 |
+
|
| 49 |
+
/** @file
|
| 50 |
+
* @deprecated Use @ref cudev instead.
|
| 51 |
+
*/
|
| 52 |
+
|
| 53 |
+
//! @cond IGNORED
|
| 54 |
+
|
| 55 |
+
namespace cv { namespace cuda { namespace device
|
| 56 |
+
{
|
| 57 |
+
struct CV_EXPORTS ThrustAllocator
|
| 58 |
+
{
|
| 59 |
+
typedef uchar value_type;
|
| 60 |
+
virtual ~ThrustAllocator();
|
| 61 |
+
virtual __device__ __host__ uchar* allocate(size_t numBytes) = 0;
|
| 62 |
+
virtual __device__ __host__ void deallocate(uchar* ptr, size_t numBytes) = 0;
|
| 63 |
+
static ThrustAllocator& getAllocator();
|
| 64 |
+
static void setAllocator(ThrustAllocator* allocator);
|
| 65 |
+
};
|
| 66 |
+
#define OPENCV_CUDA_LOG_WARP_SIZE (5)
|
| 67 |
+
#define OPENCV_CUDA_WARP_SIZE (1 << OPENCV_CUDA_LOG_WARP_SIZE)
|
| 68 |
+
#define OPENCV_CUDA_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla
|
| 69 |
+
#define OPENCV_CUDA_MEM_BANKS (1 << OPENCV_CUDA_LOG_MEM_BANKS)
|
| 70 |
+
|
| 71 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 72 |
+
// swap
|
| 73 |
+
|
| 74 |
+
template <typename T> void __device__ __host__ __forceinline__ swap(T& a, T& b)
|
| 75 |
+
{
|
| 76 |
+
const T temp = a;
|
| 77 |
+
a = b;
|
| 78 |
+
b = temp;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 82 |
+
// Mask Reader
|
| 83 |
+
|
| 84 |
+
struct SingleMask
|
| 85 |
+
{
|
| 86 |
+
explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {}
|
| 87 |
+
__host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){}
|
| 88 |
+
|
| 89 |
+
__device__ __forceinline__ bool operator()(int y, int x) const
|
| 90 |
+
{
|
| 91 |
+
return mask.ptr(y)[x] != 0;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
PtrStepb mask;
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
struct SingleMaskChannels
|
| 98 |
+
{
|
| 99 |
+
__host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_)
|
| 100 |
+
: mask(mask_), channels(channels_) {}
|
| 101 |
+
__host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_)
|
| 102 |
+
:mask(mask_.mask), channels(mask_.channels){}
|
| 103 |
+
|
| 104 |
+
__device__ __forceinline__ bool operator()(int y, int x) const
|
| 105 |
+
{
|
| 106 |
+
return mask.ptr(y)[x / channels] != 0;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
PtrStepb mask;
|
| 110 |
+
int channels;
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
struct MaskCollection
|
| 114 |
+
{
|
| 115 |
+
explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_)
|
| 116 |
+
: maskCollection(maskCollection_) {}
|
| 117 |
+
|
| 118 |
+
__device__ __forceinline__ MaskCollection(const MaskCollection& masks_)
|
| 119 |
+
: maskCollection(masks_.maskCollection), curMask(masks_.curMask){}
|
| 120 |
+
|
| 121 |
+
__device__ __forceinline__ void next()
|
| 122 |
+
{
|
| 123 |
+
curMask = *maskCollection++;
|
| 124 |
+
}
|
| 125 |
+
__device__ __forceinline__ void setMask(int z)
|
| 126 |
+
{
|
| 127 |
+
curMask = maskCollection[z];
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
__device__ __forceinline__ bool operator()(int y, int x) const
|
| 131 |
+
{
|
| 132 |
+
uchar val;
|
| 133 |
+
return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(y), x, val), (val != 0));
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
const PtrStepb* maskCollection;
|
| 137 |
+
PtrStepb curMask;
|
| 138 |
+
};
|
| 139 |
+
|
| 140 |
+
struct WithOutMask
|
| 141 |
+
{
|
| 142 |
+
__host__ __device__ __forceinline__ WithOutMask(){}
|
| 143 |
+
__host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){}
|
| 144 |
+
|
| 145 |
+
__device__ __forceinline__ void next() const
|
| 146 |
+
{
|
| 147 |
+
}
|
| 148 |
+
__device__ __forceinline__ void setMask(int) const
|
| 149 |
+
{
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
__device__ __forceinline__ bool operator()(int, int) const
|
| 153 |
+
{
|
| 154 |
+
return true;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
__device__ __forceinline__ bool operator()(int, int, int) const
|
| 158 |
+
{
|
| 159 |
+
return true;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
static __device__ __forceinline__ bool check(int, int)
|
| 163 |
+
{
|
| 164 |
+
return true;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
static __device__ __forceinline__ bool check(int, int, int)
|
| 168 |
+
{
|
| 169 |
+
return true;
|
| 170 |
+
}
|
| 171 |
+
};
|
| 172 |
+
|
| 173 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 174 |
+
// Solve linear system
|
| 175 |
+
|
| 176 |
+
// solve 2x2 linear system Ax=b
|
| 177 |
+
template <typename T> __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2])
|
| 178 |
+
{
|
| 179 |
+
T det = A[0][0] * A[1][1] - A[1][0] * A[0][1];
|
| 180 |
+
|
| 181 |
+
if (det != 0)
|
| 182 |
+
{
|
| 183 |
+
double invdet = 1.0 / det;
|
| 184 |
+
|
| 185 |
+
x[0] = saturate_cast<T>(invdet * (b[0] * A[1][1] - b[1] * A[0][1]));
|
| 186 |
+
|
| 187 |
+
x[1] = saturate_cast<T>(invdet * (A[0][0] * b[1] - A[1][0] * b[0]));
|
| 188 |
+
|
| 189 |
+
return true;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
return false;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
// solve 3x3 linear system Ax=b
|
| 196 |
+
template <typename T> __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3])
|
| 197 |
+
{
|
| 198 |
+
T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1])
|
| 199 |
+
- A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0])
|
| 200 |
+
+ A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]);
|
| 201 |
+
|
| 202 |
+
if (det != 0)
|
| 203 |
+
{
|
| 204 |
+
double invdet = 1.0 / det;
|
| 205 |
+
|
| 206 |
+
x[0] = saturate_cast<T>(invdet *
|
| 207 |
+
(b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) -
|
| 208 |
+
A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) +
|
| 209 |
+
A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] )));
|
| 210 |
+
|
| 211 |
+
x[1] = saturate_cast<T>(invdet *
|
| 212 |
+
(A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) -
|
| 213 |
+
b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) +
|
| 214 |
+
A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0])));
|
| 215 |
+
|
| 216 |
+
x[2] = saturate_cast<T>(invdet *
|
| 217 |
+
(A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) -
|
| 218 |
+
A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) +
|
| 219 |
+
b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0])));
|
| 220 |
+
|
| 221 |
+
return true;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
return false;
|
| 225 |
+
}
|
| 226 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 227 |
+
|
| 228 |
+
//! @endcond
|
| 229 |
+
|
| 230 |
+
#endif // OPENCV_CUDA_UTILITY_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/vec_distance.hpp
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_VEC_DISTANCE_HPP
|
| 44 |
+
#define OPENCV_CUDA_VEC_DISTANCE_HPP
|
| 45 |
+
|
| 46 |
+
#include "reduce.hpp"
|
| 47 |
+
#include "functional.hpp"
|
| 48 |
+
#include "detail/vec_distance_detail.hpp"
|
| 49 |
+
|
| 50 |
+
/** @file
|
| 51 |
+
* @deprecated Use @ref cudev instead.
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
//! @cond IGNORED
|
| 55 |
+
|
| 56 |
+
namespace cv { namespace cuda { namespace device
|
| 57 |
+
{
|
| 58 |
+
template <typename T> struct L1Dist
|
| 59 |
+
{
|
| 60 |
+
typedef int value_type;
|
| 61 |
+
typedef int result_type;
|
| 62 |
+
|
| 63 |
+
__device__ __forceinline__ L1Dist() : mySum(0) {}
|
| 64 |
+
|
| 65 |
+
__device__ __forceinline__ void reduceIter(int val1, int val2)
|
| 66 |
+
{
|
| 67 |
+
mySum = __sad(val1, val2, mySum);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)
|
| 71 |
+
{
|
| 72 |
+
reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
__device__ __forceinline__ operator int() const
|
| 76 |
+
{
|
| 77 |
+
return mySum;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
int mySum;
|
| 81 |
+
};
|
| 82 |
+
template <> struct L1Dist<float>
|
| 83 |
+
{
|
| 84 |
+
typedef float value_type;
|
| 85 |
+
typedef float result_type;
|
| 86 |
+
|
| 87 |
+
__device__ __forceinline__ L1Dist() : mySum(0.0f) {}
|
| 88 |
+
|
| 89 |
+
__device__ __forceinline__ void reduceIter(float val1, float val2)
|
| 90 |
+
{
|
| 91 |
+
mySum += ::fabs(val1 - val2);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)
|
| 95 |
+
{
|
| 96 |
+
reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
__device__ __forceinline__ operator float() const
|
| 100 |
+
{
|
| 101 |
+
return mySum;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
float mySum;
|
| 105 |
+
};
|
| 106 |
+
|
| 107 |
+
struct L2Dist
|
| 108 |
+
{
|
| 109 |
+
typedef float value_type;
|
| 110 |
+
typedef float result_type;
|
| 111 |
+
|
| 112 |
+
__device__ __forceinline__ L2Dist() : mySum(0.0f) {}
|
| 113 |
+
|
| 114 |
+
__device__ __forceinline__ void reduceIter(float val1, float val2)
|
| 115 |
+
{
|
| 116 |
+
float reg = val1 - val2;
|
| 117 |
+
mySum += reg * reg;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)
|
| 121 |
+
{
|
| 122 |
+
reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__device__ __forceinline__ operator float() const
|
| 126 |
+
{
|
| 127 |
+
return sqrtf(mySum);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
float mySum;
|
| 131 |
+
};
|
| 132 |
+
|
| 133 |
+
struct HammingDist
|
| 134 |
+
{
|
| 135 |
+
typedef int value_type;
|
| 136 |
+
typedef int result_type;
|
| 137 |
+
|
| 138 |
+
__device__ __forceinline__ HammingDist() : mySum(0) {}
|
| 139 |
+
|
| 140 |
+
__device__ __forceinline__ void reduceIter(int val1, int val2)
|
| 141 |
+
{
|
| 142 |
+
mySum += __popc(val1 ^ val2);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)
|
| 146 |
+
{
|
| 147 |
+
reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
__device__ __forceinline__ operator int() const
|
| 151 |
+
{
|
| 152 |
+
return mySum;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
int mySum;
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
// calc distance between two vectors in global memory
|
| 159 |
+
template <int THREAD_DIM, typename Dist, typename T1, typename T2>
|
| 160 |
+
__device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid)
|
| 161 |
+
{
|
| 162 |
+
for (int i = tid; i < len; i += THREAD_DIM)
|
| 163 |
+
{
|
| 164 |
+
T1 val1;
|
| 165 |
+
ForceGlob<T1>::Load(vec1, i, val1);
|
| 166 |
+
|
| 167 |
+
T2 val2;
|
| 168 |
+
ForceGlob<T2>::Load(vec2, i, val2);
|
| 169 |
+
|
| 170 |
+
dist.reduceIter(val1, val2);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
dist.reduceAll<THREAD_DIM>(smem, tid);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
// calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory
|
| 177 |
+
template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T1, typename T2>
|
| 178 |
+
__device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid)
|
| 179 |
+
{
|
| 180 |
+
vec_distance_detail::VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>::calc(vecCached, vecGlob, len, dist, tid);
|
| 181 |
+
|
| 182 |
+
dist.reduceAll<THREAD_DIM>(smem, tid);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
// calc distance between two vectors in global memory
|
| 186 |
+
template <int THREAD_DIM, typename T1> struct VecDiffGlobal
|
| 187 |
+
{
|
| 188 |
+
explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0)
|
| 189 |
+
{
|
| 190 |
+
vec1 = vec1_;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <typename T2, typename Dist>
|
| 194 |
+
__device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const
|
| 195 |
+
{
|
| 196 |
+
calcVecDiffGlobal<THREAD_DIM>(vec1, vec2, len, dist, smem, tid);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
const T1* vec1;
|
| 200 |
+
};
|
| 201 |
+
|
| 202 |
+
// calc distance between two vectors, first vector is cached in register memory, second vector is in global memory
|
| 203 |
+
template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename U> struct VecDiffCachedRegister
|
| 204 |
+
{
|
| 205 |
+
template <typename T1> __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid)
|
| 206 |
+
{
|
| 207 |
+
if (glob_tid < len)
|
| 208 |
+
smem[glob_tid] = vec1[glob_tid];
|
| 209 |
+
__syncthreads();
|
| 210 |
+
|
| 211 |
+
U* vec1ValsPtr = vec1Vals;
|
| 212 |
+
|
| 213 |
+
#pragma unroll
|
| 214 |
+
for (int i = tid; i < MAX_LEN; i += THREAD_DIM)
|
| 215 |
+
*vec1ValsPtr++ = smem[i];
|
| 216 |
+
|
| 217 |
+
__syncthreads();
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
template <typename T2, typename Dist>
|
| 221 |
+
__device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const
|
| 222 |
+
{
|
| 223 |
+
calcVecDiffCached<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>(vec1Vals, vec2, len, dist, smem, tid);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
U vec1Vals[MAX_LEN / THREAD_DIM];
|
| 227 |
+
};
|
| 228 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 229 |
+
|
| 230 |
+
//! @endcond
|
| 231 |
+
|
| 232 |
+
#endif // OPENCV_CUDA_VEC_DISTANCE_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/vec_math.hpp
ADDED
|
@@ -0,0 +1,923 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_VECMATH_HPP
|
| 44 |
+
#define OPENCV_CUDA_VECMATH_HPP
|
| 45 |
+
|
| 46 |
+
#include "vec_traits.hpp"
|
| 47 |
+
#include "saturate_cast.hpp"
|
| 48 |
+
|
| 49 |
+
/** @file
|
| 50 |
+
* @deprecated Use @ref cudev instead.
|
| 51 |
+
*/
|
| 52 |
+
|
| 53 |
+
//! @cond IGNORED
|
| 54 |
+
|
| 55 |
+
namespace cv { namespace cuda { namespace device
|
| 56 |
+
{
|
| 57 |
+
|
| 58 |
+
// saturate_cast
|
| 59 |
+
|
| 60 |
+
namespace vec_math_detail
|
| 61 |
+
{
|
| 62 |
+
template <int cn, typename VecD> struct SatCastHelper;
|
| 63 |
+
template <typename VecD> struct SatCastHelper<1, VecD>
|
| 64 |
+
{
|
| 65 |
+
template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
|
| 66 |
+
{
|
| 67 |
+
typedef typename VecTraits<VecD>::elem_type D;
|
| 68 |
+
return VecTraits<VecD>::make(saturate_cast<D>(v.x));
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
template <typename VecD> struct SatCastHelper<2, VecD>
|
| 72 |
+
{
|
| 73 |
+
template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
|
| 74 |
+
{
|
| 75 |
+
typedef typename VecTraits<VecD>::elem_type D;
|
| 76 |
+
return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y));
|
| 77 |
+
}
|
| 78 |
+
};
|
| 79 |
+
template <typename VecD> struct SatCastHelper<3, VecD>
|
| 80 |
+
{
|
| 81 |
+
template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
|
| 82 |
+
{
|
| 83 |
+
typedef typename VecTraits<VecD>::elem_type D;
|
| 84 |
+
return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z));
|
| 85 |
+
}
|
| 86 |
+
};
|
| 87 |
+
template <typename VecD> struct SatCastHelper<4, VecD>
|
| 88 |
+
{
|
| 89 |
+
template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
|
| 90 |
+
{
|
| 91 |
+
typedef typename VecTraits<VecD>::elem_type D;
|
| 92 |
+
return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z), saturate_cast<D>(v.w));
|
| 93 |
+
}
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
template <typename VecD, typename VecS> static __device__ __forceinline__ VecD saturate_cast_helper(const VecS& v)
|
| 97 |
+
{
|
| 98 |
+
return SatCastHelper<VecTraits<VecD>::cn, VecD>::cast(v);
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 103 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 104 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 105 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 106 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 107 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 108 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 109 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 110 |
+
|
| 111 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 112 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 113 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 114 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 115 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 116 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 117 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 118 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 119 |
+
|
| 120 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 121 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 122 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 123 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 124 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 125 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 126 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 127 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 128 |
+
|
| 129 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 130 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 131 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 132 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 133 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 134 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 135 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 136 |
+
template<typename T> static __device__ __forceinline__ T saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
|
| 137 |
+
|
| 138 |
+
// unary operators
|
| 139 |
+
|
| 140 |
+
#define CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(op, input_type, output_type) \
|
| 141 |
+
__device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a) \
|
| 142 |
+
{ \
|
| 143 |
+
return VecTraits<output_type ## 1>::make(op (a.x)); \
|
| 144 |
+
} \
|
| 145 |
+
__device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a) \
|
| 146 |
+
{ \
|
| 147 |
+
return VecTraits<output_type ## 2>::make(op (a.x), op (a.y)); \
|
| 148 |
+
} \
|
| 149 |
+
__device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a) \
|
| 150 |
+
{ \
|
| 151 |
+
return VecTraits<output_type ## 3>::make(op (a.x), op (a.y), op (a.z)); \
|
| 152 |
+
} \
|
| 153 |
+
__device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a) \
|
| 154 |
+
{ \
|
| 155 |
+
return VecTraits<output_type ## 4>::make(op (a.x), op (a.y), op (a.z), op (a.w)); \
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, char, char)
|
| 159 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, short, short)
|
| 160 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, int, int)
|
| 161 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, float, float)
|
| 162 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, double, double)
|
| 163 |
+
|
| 164 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uchar, uchar)
|
| 165 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, char, uchar)
|
| 166 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, ushort, uchar)
|
| 167 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, short, uchar)
|
| 168 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, int, uchar)
|
| 169 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uint, uchar)
|
| 170 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, float, uchar)
|
| 171 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, double, uchar)
|
| 172 |
+
|
| 173 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uchar, uchar)
|
| 174 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, char, char)
|
| 175 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, ushort, ushort)
|
| 176 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, short, short)
|
| 177 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, int, int)
|
| 178 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uint, uint)
|
| 179 |
+
|
| 180 |
+
#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_OP
|
| 181 |
+
|
| 182 |
+
// unary functions
|
| 183 |
+
|
| 184 |
+
#define CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(func_name, func, input_type, output_type) \
|
| 185 |
+
__device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a) \
|
| 186 |
+
{ \
|
| 187 |
+
return VecTraits<output_type ## 1>::make(func (a.x)); \
|
| 188 |
+
} \
|
| 189 |
+
__device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a) \
|
| 190 |
+
{ \
|
| 191 |
+
return VecTraits<output_type ## 2>::make(func (a.x), func (a.y)); \
|
| 192 |
+
} \
|
| 193 |
+
__device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a) \
|
| 194 |
+
{ \
|
| 195 |
+
return VecTraits<output_type ## 3>::make(func (a.x), func (a.y), func (a.z)); \
|
| 196 |
+
} \
|
| 197 |
+
__device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a) \
|
| 198 |
+
{ \
|
| 199 |
+
return VecTraits<output_type ## 4>::make(func (a.x), func (a.y), func (a.z), func (a.w)); \
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float)
|
| 203 |
+
|
| 204 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float)
|
| 205 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float)
|
| 206 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, ushort, float)
|
| 207 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, short, float)
|
| 208 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, int, float)
|
| 209 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uint, float)
|
| 210 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, float, float)
|
| 211 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrt, double, double)
|
| 212 |
+
|
| 213 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uchar, float)
|
| 214 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, char, float)
|
| 215 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, ushort, float)
|
| 216 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, short, float)
|
| 217 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, int, float)
|
| 218 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uint, float)
|
| 219 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, float, float)
|
| 220 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::exp, double, double)
|
| 221 |
+
|
| 222 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uchar, float)
|
| 223 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, char, float)
|
| 224 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, ushort, float)
|
| 225 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, short, float)
|
| 226 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, int, float)
|
| 227 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uint, float)
|
| 228 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, float, float)
|
| 229 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2, double, double)
|
| 230 |
+
|
| 231 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uchar, float)
|
| 232 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, char, float)
|
| 233 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, ushort, float)
|
| 234 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, short, float)
|
| 235 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, int, float)
|
| 236 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uint, float)
|
| 237 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, float, float)
|
| 238 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10, double, double)
|
| 239 |
+
|
| 240 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uchar, float)
|
| 241 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, char, float)
|
| 242 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, ushort, float)
|
| 243 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, short, float)
|
| 244 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, int, float)
|
| 245 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uint, float)
|
| 246 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, float, float)
|
| 247 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::log, double, double)
|
| 248 |
+
|
| 249 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uchar, float)
|
| 250 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, char, float)
|
| 251 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, ushort, float)
|
| 252 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, short, float)
|
| 253 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, int, float)
|
| 254 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uint, float)
|
| 255 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, float, float)
|
| 256 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2, double, double)
|
| 257 |
+
|
| 258 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uchar, float)
|
| 259 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, char, float)
|
| 260 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, ushort, float)
|
| 261 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, short, float)
|
| 262 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, int, float)
|
| 263 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uint, float)
|
| 264 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, float, float)
|
| 265 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10, double, double)
|
| 266 |
+
|
| 267 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uchar, float)
|
| 268 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, char, float)
|
| 269 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, ushort, float)
|
| 270 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, short, float)
|
| 271 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, int, float)
|
| 272 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uint, float)
|
| 273 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, float, float)
|
| 274 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sin, double, double)
|
| 275 |
+
|
| 276 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uchar, float)
|
| 277 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, char, float)
|
| 278 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, ushort, float)
|
| 279 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, short, float)
|
| 280 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, int, float)
|
| 281 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uint, float)
|
| 282 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, float, float)
|
| 283 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cos, double, double)
|
| 284 |
+
|
| 285 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uchar, float)
|
| 286 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, char, float)
|
| 287 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, ushort, float)
|
| 288 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, short, float)
|
| 289 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, int, float)
|
| 290 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uint, float)
|
| 291 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, float, float)
|
| 292 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tan, double, double)
|
| 293 |
+
|
| 294 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uchar, float)
|
| 295 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, char, float)
|
| 296 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, ushort, float)
|
| 297 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, short, float)
|
| 298 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, int, float)
|
| 299 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uint, float)
|
| 300 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, float, float)
|
| 301 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asin, double, double)
|
| 302 |
+
|
| 303 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uchar, float)
|
| 304 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, char, float)
|
| 305 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, ushort, float)
|
| 306 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, short, float)
|
| 307 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, int, float)
|
| 308 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uint, float)
|
| 309 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, float, float)
|
| 310 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acos, double, double)
|
| 311 |
+
|
| 312 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uchar, float)
|
| 313 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, char, float)
|
| 314 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, ushort, float)
|
| 315 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, short, float)
|
| 316 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, int, float)
|
| 317 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uint, float)
|
| 318 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, float, float)
|
| 319 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atan, double, double)
|
| 320 |
+
|
| 321 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uchar, float)
|
| 322 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, char, float)
|
| 323 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, ushort, float)
|
| 324 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, short, float)
|
| 325 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, int, float)
|
| 326 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uint, float)
|
| 327 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, float, float)
|
| 328 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinh, double, double)
|
| 329 |
+
|
| 330 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uchar, float)
|
| 331 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, char, float)
|
| 332 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, ushort, float)
|
| 333 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, short, float)
|
| 334 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, int, float)
|
| 335 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uint, float)
|
| 336 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, float, float)
|
| 337 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::cosh, double, double)
|
| 338 |
+
|
| 339 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uchar, float)
|
| 340 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, char, float)
|
| 341 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, ushort, float)
|
| 342 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, short, float)
|
| 343 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, int, float)
|
| 344 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uint, float)
|
| 345 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, float, float)
|
| 346 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanh, double, double)
|
| 347 |
+
|
| 348 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uchar, float)
|
| 349 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, char, float)
|
| 350 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, ushort, float)
|
| 351 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, short, float)
|
| 352 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, int, float)
|
| 353 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uint, float)
|
| 354 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, float, float)
|
| 355 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinh, double, double)
|
| 356 |
+
|
| 357 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uchar, float)
|
| 358 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, char, float)
|
| 359 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, ushort, float)
|
| 360 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, short, float)
|
| 361 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, int, float)
|
| 362 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uint, float)
|
| 363 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, float, float)
|
| 364 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acosh, double, double)
|
| 365 |
+
|
| 366 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uchar, float)
|
| 367 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, char, float)
|
| 368 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, ushort, float)
|
| 369 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, short, float)
|
| 370 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, int, float)
|
| 371 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uint, float)
|
| 372 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, float, float)
|
| 373 |
+
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanh, double, double)
|
| 374 |
+
|
| 375 |
+
#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC
|
| 376 |
+
|
| 377 |
+
// binary operators (vec & vec)
|
| 378 |
+
|
| 379 |
+
#define CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(op, input_type, output_type) \
|
| 380 |
+
__device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, const input_type ## 1 & b) \
|
| 381 |
+
{ \
|
| 382 |
+
return VecTraits<output_type ## 1>::make(a.x op b.x); \
|
| 383 |
+
} \
|
| 384 |
+
__device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, const input_type ## 2 & b) \
|
| 385 |
+
{ \
|
| 386 |
+
return VecTraits<output_type ## 2>::make(a.x op b.x, a.y op b.y); \
|
| 387 |
+
} \
|
| 388 |
+
__device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, const input_type ## 3 & b) \
|
| 389 |
+
{ \
|
| 390 |
+
return VecTraits<output_type ## 3>::make(a.x op b.x, a.y op b.y, a.z op b.z); \
|
| 391 |
+
} \
|
| 392 |
+
__device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, const input_type ## 4 & b) \
|
| 393 |
+
{ \
|
| 394 |
+
return VecTraits<output_type ## 4>::make(a.x op b.x, a.y op b.y, a.z op b.z, a.w op b.w); \
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uchar, int)
|
| 398 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, char, int)
|
| 399 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, ushort, int)
|
| 400 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, short, int)
|
| 401 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, int, int)
|
| 402 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uint, uint)
|
| 403 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, float, float)
|
| 404 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, double, double)
|
| 405 |
+
|
| 406 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uchar, int)
|
| 407 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, char, int)
|
| 408 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, ushort, int)
|
| 409 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, short, int)
|
| 410 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, int, int)
|
| 411 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uint, uint)
|
| 412 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, float, float)
|
| 413 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, double, double)
|
| 414 |
+
|
| 415 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uchar, int)
|
| 416 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, char, int)
|
| 417 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, ushort, int)
|
| 418 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, short, int)
|
| 419 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, int, int)
|
| 420 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uint, uint)
|
| 421 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, float, float)
|
| 422 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, double, double)
|
| 423 |
+
|
| 424 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uchar, int)
|
| 425 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, char, int)
|
| 426 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, ushort, int)
|
| 427 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, short, int)
|
| 428 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, int, int)
|
| 429 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uint, uint)
|
| 430 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, float, float)
|
| 431 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, double, double)
|
| 432 |
+
|
| 433 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uchar, uchar)
|
| 434 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, char, uchar)
|
| 435 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, ushort, uchar)
|
| 436 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, short, uchar)
|
| 437 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, int, uchar)
|
| 438 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uint, uchar)
|
| 439 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, float, uchar)
|
| 440 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, double, uchar)
|
| 441 |
+
|
| 442 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uchar, uchar)
|
| 443 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, char, uchar)
|
| 444 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, ushort, uchar)
|
| 445 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, short, uchar)
|
| 446 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, int, uchar)
|
| 447 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uint, uchar)
|
| 448 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, float, uchar)
|
| 449 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, double, uchar)
|
| 450 |
+
|
| 451 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uchar, uchar)
|
| 452 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, char, uchar)
|
| 453 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, ushort, uchar)
|
| 454 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, short, uchar)
|
| 455 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, int, uchar)
|
| 456 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uint, uchar)
|
| 457 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, float, uchar)
|
| 458 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, double, uchar)
|
| 459 |
+
|
| 460 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uchar, uchar)
|
| 461 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, char, uchar)
|
| 462 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, ushort, uchar)
|
| 463 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, short, uchar)
|
| 464 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, int, uchar)
|
| 465 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uint, uchar)
|
| 466 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, float, uchar)
|
| 467 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, double, uchar)
|
| 468 |
+
|
| 469 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uchar, uchar)
|
| 470 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, char, uchar)
|
| 471 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, ushort, uchar)
|
| 472 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, short, uchar)
|
| 473 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, int, uchar)
|
| 474 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uint, uchar)
|
| 475 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, float, uchar)
|
| 476 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, double, uchar)
|
| 477 |
+
|
| 478 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uchar, uchar)
|
| 479 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, char, uchar)
|
| 480 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, ushort, uchar)
|
| 481 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, short, uchar)
|
| 482 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, int, uchar)
|
| 483 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uint, uchar)
|
| 484 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, float, uchar)
|
| 485 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, double, uchar)
|
| 486 |
+
|
| 487 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uchar, uchar)
|
| 488 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, char, uchar)
|
| 489 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, ushort, uchar)
|
| 490 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, short, uchar)
|
| 491 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, int, uchar)
|
| 492 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uint, uchar)
|
| 493 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, float, uchar)
|
| 494 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, double, uchar)
|
| 495 |
+
|
| 496 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uchar, uchar)
|
| 497 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, char, uchar)
|
| 498 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, ushort, uchar)
|
| 499 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, short, uchar)
|
| 500 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, int, uchar)
|
| 501 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uint, uchar)
|
| 502 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, float, uchar)
|
| 503 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, double, uchar)
|
| 504 |
+
|
| 505 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uchar, uchar)
|
| 506 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, char, char)
|
| 507 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, ushort, ushort)
|
| 508 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, short, short)
|
| 509 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, int, int)
|
| 510 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uint, uint)
|
| 511 |
+
|
| 512 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uchar, uchar)
|
| 513 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, char, char)
|
| 514 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, ushort, ushort)
|
| 515 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, short, short)
|
| 516 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, int, int)
|
| 517 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uint, uint)
|
| 518 |
+
|
| 519 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uchar, uchar)
|
| 520 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, char, char)
|
| 521 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, ushort, ushort)
|
| 522 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, short, short)
|
| 523 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, int, int)
|
| 524 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uint, uint)
|
| 525 |
+
|
| 526 |
+
#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_OP
|
| 527 |
+
|
| 528 |
+
// binary operators (vec & scalar)
|
| 529 |
+
|
| 530 |
+
#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(op, input_type, scalar_type, output_type) \
|
| 531 |
+
__device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, scalar_type s) \
|
| 532 |
+
{ \
|
| 533 |
+
return VecTraits<output_type ## 1>::make(a.x op s); \
|
| 534 |
+
} \
|
| 535 |
+
__device__ __forceinline__ output_type ## 1 operator op(scalar_type s, const input_type ## 1 & b) \
|
| 536 |
+
{ \
|
| 537 |
+
return VecTraits<output_type ## 1>::make(s op b.x); \
|
| 538 |
+
} \
|
| 539 |
+
__device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, scalar_type s) \
|
| 540 |
+
{ \
|
| 541 |
+
return VecTraits<output_type ## 2>::make(a.x op s, a.y op s); \
|
| 542 |
+
} \
|
| 543 |
+
__device__ __forceinline__ output_type ## 2 operator op(scalar_type s, const input_type ## 2 & b) \
|
| 544 |
+
{ \
|
| 545 |
+
return VecTraits<output_type ## 2>::make(s op b.x, s op b.y); \
|
| 546 |
+
} \
|
| 547 |
+
__device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, scalar_type s) \
|
| 548 |
+
{ \
|
| 549 |
+
return VecTraits<output_type ## 3>::make(a.x op s, a.y op s, a.z op s); \
|
| 550 |
+
} \
|
| 551 |
+
__device__ __forceinline__ output_type ## 3 operator op(scalar_type s, const input_type ## 3 & b) \
|
| 552 |
+
{ \
|
| 553 |
+
return VecTraits<output_type ## 3>::make(s op b.x, s op b.y, s op b.z); \
|
| 554 |
+
} \
|
| 555 |
+
__device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, scalar_type s) \
|
| 556 |
+
{ \
|
| 557 |
+
return VecTraits<output_type ## 4>::make(a.x op s, a.y op s, a.z op s, a.w op s); \
|
| 558 |
+
} \
|
| 559 |
+
__device__ __forceinline__ output_type ## 4 operator op(scalar_type s, const input_type ## 4 & b) \
|
| 560 |
+
{ \
|
| 561 |
+
return VecTraits<output_type ## 4>::make(s op b.x, s op b.y, s op b.z, s op b.w); \
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, int, int)
|
| 565 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, float, float)
|
| 566 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, double, double)
|
| 567 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, int, int)
|
| 568 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, float, float)
|
| 569 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, double, double)
|
| 570 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, int, int)
|
| 571 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, float, float)
|
| 572 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, double, double)
|
| 573 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, int, int)
|
| 574 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, float, float)
|
| 575 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, double, double)
|
| 576 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, int, int)
|
| 577 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, float, float)
|
| 578 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, double, double)
|
| 579 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, uint, uint)
|
| 580 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, float, float)
|
| 581 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, double, double)
|
| 582 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, float, float)
|
| 583 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, double, double)
|
| 584 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, double, double, double)
|
| 585 |
+
|
| 586 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, int, int)
|
| 587 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, float, float)
|
| 588 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, double, double)
|
| 589 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, int, int)
|
| 590 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, float, float)
|
| 591 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, double, double)
|
| 592 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, int, int)
|
| 593 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, float, float)
|
| 594 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, double, double)
|
| 595 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, int, int)
|
| 596 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, float, float)
|
| 597 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, double, double)
|
| 598 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, int, int)
|
| 599 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, float, float)
|
| 600 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, double, double)
|
| 601 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, uint, uint)
|
| 602 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, float, float)
|
| 603 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, double, double)
|
| 604 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, float, float)
|
| 605 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, double, double)
|
| 606 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, double, double, double)
|
| 607 |
+
|
| 608 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, int, int)
|
| 609 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, float, float)
|
| 610 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, double, double)
|
| 611 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, int, int)
|
| 612 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, float, float)
|
| 613 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, double, double)
|
| 614 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, int, int)
|
| 615 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, float, float)
|
| 616 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, double, double)
|
| 617 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, int, int)
|
| 618 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, float, float)
|
| 619 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, double, double)
|
| 620 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, int, int)
|
| 621 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, float, float)
|
| 622 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, double, double)
|
| 623 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, uint, uint)
|
| 624 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, float, float)
|
| 625 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, double, double)
|
| 626 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, float, float)
|
| 627 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, double, double)
|
| 628 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, double, double, double)
|
| 629 |
+
|
| 630 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, int, int)
|
| 631 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, float, float)
|
| 632 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, double, double)
|
| 633 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, int, int)
|
| 634 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, float, float)
|
| 635 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, double, double)
|
| 636 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, int, int)
|
| 637 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, float, float)
|
| 638 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, double, double)
|
| 639 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, int, int)
|
| 640 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, float, float)
|
| 641 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, double, double)
|
| 642 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, int, int)
|
| 643 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, float, float)
|
| 644 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, double, double)
|
| 645 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, uint, uint)
|
| 646 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, float, float)
|
| 647 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, double, double)
|
| 648 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, float, float)
|
| 649 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, double, double)
|
| 650 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, double, double, double)
|
| 651 |
+
|
| 652 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uchar, uchar, uchar)
|
| 653 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, char, char, uchar)
|
| 654 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, ushort, ushort, uchar)
|
| 655 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, short, short, uchar)
|
| 656 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, int, int, uchar)
|
| 657 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uint, uint, uchar)
|
| 658 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, float, float, uchar)
|
| 659 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, double, double, uchar)
|
| 660 |
+
|
| 661 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uchar, uchar, uchar)
|
| 662 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, char, char, uchar)
|
| 663 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, ushort, ushort, uchar)
|
| 664 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, short, short, uchar)
|
| 665 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, int, int, uchar)
|
| 666 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uint, uint, uchar)
|
| 667 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, float, float, uchar)
|
| 668 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, double, double, uchar)
|
| 669 |
+
|
| 670 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uchar, uchar, uchar)
|
| 671 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, char, char, uchar)
|
| 672 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, ushort, ushort, uchar)
|
| 673 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, short, short, uchar)
|
| 674 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, int, int, uchar)
|
| 675 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uint, uint, uchar)
|
| 676 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, float, float, uchar)
|
| 677 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, double, double, uchar)
|
| 678 |
+
|
| 679 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uchar, uchar, uchar)
|
| 680 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, char, char, uchar)
|
| 681 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, ushort, ushort, uchar)
|
| 682 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, short, short, uchar)
|
| 683 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, int, int, uchar)
|
| 684 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uint, uint, uchar)
|
| 685 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, float, float, uchar)
|
| 686 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, double, double, uchar)
|
| 687 |
+
|
| 688 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uchar, uchar, uchar)
|
| 689 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, char, char, uchar)
|
| 690 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, ushort, ushort, uchar)
|
| 691 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, short, short, uchar)
|
| 692 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, int, int, uchar)
|
| 693 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uint, uint, uchar)
|
| 694 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, float, float, uchar)
|
| 695 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, double, double, uchar)
|
| 696 |
+
|
| 697 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uchar, uchar, uchar)
|
| 698 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, char, char, uchar)
|
| 699 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, ushort, ushort, uchar)
|
| 700 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, short, short, uchar)
|
| 701 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, int, int, uchar)
|
| 702 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uint, uint, uchar)
|
| 703 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, float, float, uchar)
|
| 704 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, double, double, uchar)
|
| 705 |
+
|
| 706 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uchar, uchar, uchar)
|
| 707 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, char, char, uchar)
|
| 708 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, ushort, ushort, uchar)
|
| 709 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, short, short, uchar)
|
| 710 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, int, int, uchar)
|
| 711 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uint, uint, uchar)
|
| 712 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, float, float, uchar)
|
| 713 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, double, double, uchar)
|
| 714 |
+
|
| 715 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uchar, uchar, uchar)
|
| 716 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, char, char, uchar)
|
| 717 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, ushort, ushort, uchar)
|
| 718 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, short, short, uchar)
|
| 719 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, int, int, uchar)
|
| 720 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uint, uint, uchar)
|
| 721 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, float, float, uchar)
|
| 722 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, double, double, uchar)
|
| 723 |
+
|
| 724 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uchar, uchar, uchar)
|
| 725 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, char, char, char)
|
| 726 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, ushort, ushort, ushort)
|
| 727 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, short, short, short)
|
| 728 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, int, int, int)
|
| 729 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uint, uint, uint)
|
| 730 |
+
|
| 731 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uchar, uchar, uchar)
|
| 732 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, char, char, char)
|
| 733 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, ushort, ushort, ushort)
|
| 734 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, short, short, short)
|
| 735 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, int, int, int)
|
| 736 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uint, uint, uint)
|
| 737 |
+
|
| 738 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uchar, uchar, uchar)
|
| 739 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, char, char, char)
|
| 740 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, ushort, ushort, ushort)
|
| 741 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, short, short, short)
|
| 742 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, int, int, int)
|
| 743 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uint, uint, uint)
|
| 744 |
+
|
| 745 |
+
#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP
|
| 746 |
+
|
| 747 |
+
// binary function (vec & vec)
|
| 748 |
+
|
| 749 |
+
#define CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(func_name, func, input_type, output_type) \
|
| 750 |
+
__device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, const input_type ## 1 & b) \
|
| 751 |
+
{ \
|
| 752 |
+
return VecTraits<output_type ## 1>::make(func (a.x, b.x)); \
|
| 753 |
+
} \
|
| 754 |
+
__device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, const input_type ## 2 & b) \
|
| 755 |
+
{ \
|
| 756 |
+
return VecTraits<output_type ## 2>::make(func (a.x, b.x), func (a.y, b.y)); \
|
| 757 |
+
} \
|
| 758 |
+
__device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, const input_type ## 3 & b) \
|
| 759 |
+
{ \
|
| 760 |
+
return VecTraits<output_type ## 3>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z)); \
|
| 761 |
+
} \
|
| 762 |
+
__device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, const input_type ## 4 & b) \
|
| 763 |
+
{ \
|
| 764 |
+
return VecTraits<output_type ## 4>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z), func (a.w, b.w)); \
|
| 765 |
+
}
|
| 766 |
+
|
| 767 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uchar, uchar)
|
| 768 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, char, char)
|
| 769 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, ushort, ushort)
|
| 770 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, short, short)
|
| 771 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uint, uint)
|
| 772 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, int, int)
|
| 773 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmaxf, float, float)
|
| 774 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmax, double, double)
|
| 775 |
+
|
| 776 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uchar, uchar)
|
| 777 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, char, char)
|
| 778 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, ushort, ushort)
|
| 779 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, short, short)
|
| 780 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uint, uint)
|
| 781 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, int, int)
|
| 782 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fminf, float, float)
|
| 783 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fmin, double, double)
|
| 784 |
+
|
| 785 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uchar, float)
|
| 786 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, char, float)
|
| 787 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, ushort, float)
|
| 788 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, short, float)
|
| 789 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uint, float)
|
| 790 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, int, float)
|
| 791 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, float, float)
|
| 792 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypot, double, double)
|
| 793 |
+
|
| 794 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uchar, float)
|
| 795 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, char, float)
|
| 796 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, ushort, float)
|
| 797 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, short, float)
|
| 798 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uint, float)
|
| 799 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, int, float)
|
| 800 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, float, float)
|
| 801 |
+
CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2, double, double)
|
| 802 |
+
|
| 803 |
+
#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC
|
| 804 |
+
|
| 805 |
+
// binary function (vec & scalar)
|
| 806 |
+
|
| 807 |
+
#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(func_name, func, input_type, scalar_type, output_type) \
|
| 808 |
+
__device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, scalar_type s) \
|
| 809 |
+
{ \
|
| 810 |
+
return VecTraits<output_type ## 1>::make(func ((output_type) a.x, (output_type) s)); \
|
| 811 |
+
} \
|
| 812 |
+
__device__ __forceinline__ output_type ## 1 func_name(scalar_type s, const input_type ## 1 & b) \
|
| 813 |
+
{ \
|
| 814 |
+
return VecTraits<output_type ## 1>::make(func ((output_type) s, (output_type) b.x)); \
|
| 815 |
+
} \
|
| 816 |
+
__device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, scalar_type s) \
|
| 817 |
+
{ \
|
| 818 |
+
return VecTraits<output_type ## 2>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s)); \
|
| 819 |
+
} \
|
| 820 |
+
__device__ __forceinline__ output_type ## 2 func_name(scalar_type s, const input_type ## 2 & b) \
|
| 821 |
+
{ \
|
| 822 |
+
return VecTraits<output_type ## 2>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y)); \
|
| 823 |
+
} \
|
| 824 |
+
__device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, scalar_type s) \
|
| 825 |
+
{ \
|
| 826 |
+
return VecTraits<output_type ## 3>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s)); \
|
| 827 |
+
} \
|
| 828 |
+
__device__ __forceinline__ output_type ## 3 func_name(scalar_type s, const input_type ## 3 & b) \
|
| 829 |
+
{ \
|
| 830 |
+
return VecTraits<output_type ## 3>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z)); \
|
| 831 |
+
} \
|
| 832 |
+
__device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, scalar_type s) \
|
| 833 |
+
{ \
|
| 834 |
+
return VecTraits<output_type ## 4>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s), func ((output_type) a.w, (output_type) s)); \
|
| 835 |
+
} \
|
| 836 |
+
__device__ __forceinline__ output_type ## 4 func_name(scalar_type s, const input_type ## 4 & b) \
|
| 837 |
+
{ \
|
| 838 |
+
return VecTraits<output_type ## 4>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z), func ((output_type) s, (output_type) b.w)); \
|
| 839 |
+
}
|
| 840 |
+
|
| 841 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uchar, uchar, uchar)
|
| 842 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uchar, float, float)
|
| 843 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uchar, double, double)
|
| 844 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, char, char, char)
|
| 845 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, char, float, float)
|
| 846 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, char, double, double)
|
| 847 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, ushort, ushort, ushort)
|
| 848 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, ushort, float, float)
|
| 849 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, ushort, double, double)
|
| 850 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, short, short, short)
|
| 851 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, short, float, float)
|
| 852 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, short, double, double)
|
| 853 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uint, uint, uint)
|
| 854 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uint, float, float)
|
| 855 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uint, double, double)
|
| 856 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, int, int, int)
|
| 857 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, int, float, float)
|
| 858 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, int, double, double)
|
| 859 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, float, float, float)
|
| 860 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, float, double, double)
|
| 861 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, double, double, double)
|
| 862 |
+
|
| 863 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uchar, uchar, uchar)
|
| 864 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uchar, float, float)
|
| 865 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uchar, double, double)
|
| 866 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, char, char, char)
|
| 867 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, char, float, float)
|
| 868 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, char, double, double)
|
| 869 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, ushort, ushort, ushort)
|
| 870 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, ushort, float, float)
|
| 871 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, ushort, double, double)
|
| 872 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, short, short, short)
|
| 873 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, short, float, float)
|
| 874 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, short, double, double)
|
| 875 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uint, uint, uint)
|
| 876 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uint, float, float)
|
| 877 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uint, double, double)
|
| 878 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, int, int, int)
|
| 879 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, int, float, float)
|
| 880 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, int, double, double)
|
| 881 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, float, float, float)
|
| 882 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, float, double, double)
|
| 883 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, double, double, double)
|
| 884 |
+
|
| 885 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uchar, float, float)
|
| 886 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uchar, double, double)
|
| 887 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, char, float, float)
|
| 888 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, char, double, double)
|
| 889 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, ushort, float, float)
|
| 890 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, ushort, double, double)
|
| 891 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, short, float, float)
|
| 892 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, short, double, double)
|
| 893 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uint, float, float)
|
| 894 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uint, double, double)
|
| 895 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, int, float, float)
|
| 896 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, int, double, double)
|
| 897 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, float, float, float)
|
| 898 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, float, double, double)
|
| 899 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, double, double, double)
|
| 900 |
+
|
| 901 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uchar, float, float)
|
| 902 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uchar, double, double)
|
| 903 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, char, float, float)
|
| 904 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, char, double, double)
|
| 905 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, ushort, float, float)
|
| 906 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, ushort, double, double)
|
| 907 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, short, float, float)
|
| 908 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, short, double, double)
|
| 909 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uint, float, float)
|
| 910 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uint, double, double)
|
| 911 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, int, float, float)
|
| 912 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, int, double, double)
|
| 913 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, float, float, float)
|
| 914 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, float, double, double)
|
| 915 |
+
CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double)
|
| 916 |
+
|
| 917 |
+
#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC
|
| 918 |
+
|
| 919 |
+
}}} // namespace cv { namespace cuda { namespace device
|
| 920 |
+
|
| 921 |
+
//! @endcond
|
| 922 |
+
|
| 923 |
+
#endif // OPENCV_CUDA_VECMATH_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/vec_traits.hpp
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_VEC_TRAITS_HPP
|
| 44 |
+
#define OPENCV_CUDA_VEC_TRAITS_HPP
|
| 45 |
+
|
| 46 |
+
#include "common.hpp"
|
| 47 |
+
|
| 48 |
+
/** @file
|
| 49 |
+
* @deprecated Use @ref cudev instead.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
//! @cond IGNORED
|
| 53 |
+
|
| 54 |
+
namespace cv { namespace cuda { namespace device
|
| 55 |
+
{
|
| 56 |
+
template<typename T, int N> struct TypeVec;
|
| 57 |
+
|
| 58 |
+
struct __align__(8) uchar8
|
| 59 |
+
{
|
| 60 |
+
uchar a0, a1, a2, a3, a4, a5, a6, a7;
|
| 61 |
+
};
|
| 62 |
+
static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7)
|
| 63 |
+
{
|
| 64 |
+
uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 65 |
+
return val;
|
| 66 |
+
}
|
| 67 |
+
struct __align__(8) char8
|
| 68 |
+
{
|
| 69 |
+
schar a0, a1, a2, a3, a4, a5, a6, a7;
|
| 70 |
+
};
|
| 71 |
+
static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7)
|
| 72 |
+
{
|
| 73 |
+
char8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 74 |
+
return val;
|
| 75 |
+
}
|
| 76 |
+
struct __align__(16) ushort8
|
| 77 |
+
{
|
| 78 |
+
ushort a0, a1, a2, a3, a4, a5, a6, a7;
|
| 79 |
+
};
|
| 80 |
+
static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7)
|
| 81 |
+
{
|
| 82 |
+
ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 83 |
+
return val;
|
| 84 |
+
}
|
| 85 |
+
struct __align__(16) short8
|
| 86 |
+
{
|
| 87 |
+
short a0, a1, a2, a3, a4, a5, a6, a7;
|
| 88 |
+
};
|
| 89 |
+
static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7)
|
| 90 |
+
{
|
| 91 |
+
short8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 92 |
+
return val;
|
| 93 |
+
}
|
| 94 |
+
struct __align__(32) uint8
|
| 95 |
+
{
|
| 96 |
+
uint a0, a1, a2, a3, a4, a5, a6, a7;
|
| 97 |
+
};
|
| 98 |
+
static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7)
|
| 99 |
+
{
|
| 100 |
+
uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 101 |
+
return val;
|
| 102 |
+
}
|
| 103 |
+
struct __align__(32) int8
|
| 104 |
+
{
|
| 105 |
+
int a0, a1, a2, a3, a4, a5, a6, a7;
|
| 106 |
+
};
|
| 107 |
+
static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7)
|
| 108 |
+
{
|
| 109 |
+
int8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 110 |
+
return val;
|
| 111 |
+
}
|
| 112 |
+
struct __align__(32) float8
|
| 113 |
+
{
|
| 114 |
+
float a0, a1, a2, a3, a4, a5, a6, a7;
|
| 115 |
+
};
|
| 116 |
+
static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7)
|
| 117 |
+
{
|
| 118 |
+
float8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 119 |
+
return val;
|
| 120 |
+
}
|
| 121 |
+
struct double8
|
| 122 |
+
{
|
| 123 |
+
double a0, a1, a2, a3, a4, a5, a6, a7;
|
| 124 |
+
};
|
| 125 |
+
static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7)
|
| 126 |
+
{
|
| 127 |
+
double8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
|
| 128 |
+
return val;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
#define OPENCV_CUDA_IMPLEMENT_TYPE_VEC(type) \
|
| 132 |
+
template<> struct TypeVec<type, 1> { typedef type vec_type; }; \
|
| 133 |
+
template<> struct TypeVec<type ## 1, 1> { typedef type ## 1 vec_type; }; \
|
| 134 |
+
template<> struct TypeVec<type, 2> { typedef type ## 2 vec_type; }; \
|
| 135 |
+
template<> struct TypeVec<type ## 2, 2> { typedef type ## 2 vec_type; }; \
|
| 136 |
+
template<> struct TypeVec<type, 3> { typedef type ## 3 vec_type; }; \
|
| 137 |
+
template<> struct TypeVec<type ## 3, 3> { typedef type ## 3 vec_type; }; \
|
| 138 |
+
template<> struct TypeVec<type, 4> { typedef type ## 4 vec_type; }; \
|
| 139 |
+
template<> struct TypeVec<type ## 4, 4> { typedef type ## 4 vec_type; }; \
|
| 140 |
+
template<> struct TypeVec<type, 8> { typedef type ## 8 vec_type; }; \
|
| 141 |
+
template<> struct TypeVec<type ## 8, 8> { typedef type ## 8 vec_type; };
|
| 142 |
+
|
| 143 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uchar)
|
| 144 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(char)
|
| 145 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(ushort)
|
| 146 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(short)
|
| 147 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(int)
|
| 148 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uint)
|
| 149 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(float)
|
| 150 |
+
OPENCV_CUDA_IMPLEMENT_TYPE_VEC(double)
|
| 151 |
+
|
| 152 |
+
#undef OPENCV_CUDA_IMPLEMENT_TYPE_VEC
|
| 153 |
+
|
| 154 |
+
template<> struct TypeVec<schar, 1> { typedef schar vec_type; };
|
| 155 |
+
template<> struct TypeVec<schar, 2> { typedef char2 vec_type; };
|
| 156 |
+
template<> struct TypeVec<schar, 3> { typedef char3 vec_type; };
|
| 157 |
+
template<> struct TypeVec<schar, 4> { typedef char4 vec_type; };
|
| 158 |
+
template<> struct TypeVec<schar, 8> { typedef char8 vec_type; };
|
| 159 |
+
|
| 160 |
+
template<> struct TypeVec<bool, 1> { typedef uchar vec_type; };
|
| 161 |
+
template<> struct TypeVec<bool, 2> { typedef uchar2 vec_type; };
|
| 162 |
+
template<> struct TypeVec<bool, 3> { typedef uchar3 vec_type; };
|
| 163 |
+
template<> struct TypeVec<bool, 4> { typedef uchar4 vec_type; };
|
| 164 |
+
template<> struct TypeVec<bool, 8> { typedef uchar8 vec_type; };
|
| 165 |
+
|
| 166 |
+
template<typename T> struct VecTraits;
|
| 167 |
+
|
| 168 |
+
#define OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(type) \
|
| 169 |
+
template<> struct VecTraits<type> \
|
| 170 |
+
{ \
|
| 171 |
+
typedef type elem_type; \
|
| 172 |
+
enum {cn=1}; \
|
| 173 |
+
static __device__ __host__ __forceinline__ type all(type v) {return v;} \
|
| 174 |
+
static __device__ __host__ __forceinline__ type make(type x) {return x;} \
|
| 175 |
+
static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \
|
| 176 |
+
}; \
|
| 177 |
+
template<> struct VecTraits<type ## 1> \
|
| 178 |
+
{ \
|
| 179 |
+
typedef type elem_type; \
|
| 180 |
+
enum {cn=1}; \
|
| 181 |
+
static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \
|
| 182 |
+
static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \
|
| 183 |
+
static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \
|
| 184 |
+
}; \
|
| 185 |
+
template<> struct VecTraits<type ## 2> \
|
| 186 |
+
{ \
|
| 187 |
+
typedef type elem_type; \
|
| 188 |
+
enum {cn=2}; \
|
| 189 |
+
static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \
|
| 190 |
+
static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \
|
| 191 |
+
static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \
|
| 192 |
+
}; \
|
| 193 |
+
template<> struct VecTraits<type ## 3> \
|
| 194 |
+
{ \
|
| 195 |
+
typedef type elem_type; \
|
| 196 |
+
enum {cn=3}; \
|
| 197 |
+
static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \
|
| 198 |
+
static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \
|
| 199 |
+
static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \
|
| 200 |
+
}; \
|
| 201 |
+
template<> struct VecTraits<type ## 4> \
|
| 202 |
+
{ \
|
| 203 |
+
typedef type elem_type; \
|
| 204 |
+
enum {cn=4}; \
|
| 205 |
+
static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \
|
| 206 |
+
static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \
|
| 207 |
+
static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \
|
| 208 |
+
}; \
|
| 209 |
+
template<> struct VecTraits<type ## 8> \
|
| 210 |
+
{ \
|
| 211 |
+
typedef type elem_type; \
|
| 212 |
+
enum {cn=8}; \
|
| 213 |
+
static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \
|
| 214 |
+
static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \
|
| 215 |
+
static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uchar)
|
| 219 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(ushort)
|
| 220 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(short)
|
| 221 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(int)
|
| 222 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uint)
|
| 223 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(float)
|
| 224 |
+
OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(double)
|
| 225 |
+
|
| 226 |
+
#undef OPENCV_CUDA_IMPLEMENT_VEC_TRAITS
|
| 227 |
+
|
| 228 |
+
template<> struct VecTraits<char>
|
| 229 |
+
{
|
| 230 |
+
typedef char elem_type;
|
| 231 |
+
enum {cn=1};
|
| 232 |
+
static __device__ __host__ __forceinline__ char all(char v) {return v;}
|
| 233 |
+
static __device__ __host__ __forceinline__ char make(char x) {return x;}
|
| 234 |
+
static __device__ __host__ __forceinline__ char make(const char* x) {return *x;}
|
| 235 |
+
};
|
| 236 |
+
template<> struct VecTraits<schar>
|
| 237 |
+
{
|
| 238 |
+
typedef schar elem_type;
|
| 239 |
+
enum {cn=1};
|
| 240 |
+
static __device__ __host__ __forceinline__ schar all(schar v) {return v;}
|
| 241 |
+
static __device__ __host__ __forceinline__ schar make(schar x) {return x;}
|
| 242 |
+
static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;}
|
| 243 |
+
};
|
| 244 |
+
template<> struct VecTraits<char1>
|
| 245 |
+
{
|
| 246 |
+
typedef schar elem_type;
|
| 247 |
+
enum {cn=1};
|
| 248 |
+
static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);}
|
| 249 |
+
static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);}
|
| 250 |
+
static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);}
|
| 251 |
+
};
|
| 252 |
+
template<> struct VecTraits<char2>
|
| 253 |
+
{
|
| 254 |
+
typedef schar elem_type;
|
| 255 |
+
enum {cn=2};
|
| 256 |
+
static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);}
|
| 257 |
+
static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);}
|
| 258 |
+
static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);}
|
| 259 |
+
};
|
| 260 |
+
template<> struct VecTraits<char3>
|
| 261 |
+
{
|
| 262 |
+
typedef schar elem_type;
|
| 263 |
+
enum {cn=3};
|
| 264 |
+
static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);}
|
| 265 |
+
static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);}
|
| 266 |
+
static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);}
|
| 267 |
+
};
|
| 268 |
+
template<> struct VecTraits<char4>
|
| 269 |
+
{
|
| 270 |
+
typedef schar elem_type;
|
| 271 |
+
enum {cn=4};
|
| 272 |
+
static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);}
|
| 273 |
+
static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);}
|
| 274 |
+
static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);}
|
| 275 |
+
};
|
| 276 |
+
template<> struct VecTraits<char8>
|
| 277 |
+
{
|
| 278 |
+
typedef schar elem_type;
|
| 279 |
+
enum {cn=8};
|
| 280 |
+
static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);}
|
| 281 |
+
static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);}
|
| 282 |
+
static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);}
|
| 283 |
+
};
|
| 284 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 285 |
+
|
| 286 |
+
//! @endcond
|
| 287 |
+
|
| 288 |
+
#endif // OPENCV_CUDA_VEC_TRAITS_HPP
|
3rdparty/opencv/include/opencv2/core/cuda/warp.hpp
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_DEVICE_WARP_HPP
|
| 44 |
+
#define OPENCV_CUDA_DEVICE_WARP_HPP
|
| 45 |
+
|
| 46 |
+
/** @file
|
| 47 |
+
* @deprecated Use @ref cudev instead.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
struct Warp
|
| 55 |
+
{
|
| 56 |
+
enum
|
| 57 |
+
{
|
| 58 |
+
LOG_WARP_SIZE = 5,
|
| 59 |
+
WARP_SIZE = 1 << LOG_WARP_SIZE,
|
| 60 |
+
STRIDE = WARP_SIZE
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
/** \brief Returns the warp lane ID of the calling thread. */
|
| 64 |
+
static __device__ __forceinline__ unsigned int laneId()
|
| 65 |
+
{
|
| 66 |
+
unsigned int ret;
|
| 67 |
+
asm("mov.u32 %0, %%laneid;" : "=r"(ret) );
|
| 68 |
+
return ret;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template<typename It, typename T>
|
| 72 |
+
static __device__ __forceinline__ void fill(It beg, It end, const T& value)
|
| 73 |
+
{
|
| 74 |
+
for(It t = beg + laneId(); t < end; t += STRIDE)
|
| 75 |
+
*t = value;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
template<typename InIt, typename OutIt>
|
| 79 |
+
static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out)
|
| 80 |
+
{
|
| 81 |
+
for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)
|
| 82 |
+
*out = *t;
|
| 83 |
+
return out;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template<typename InIt, typename OutIt, class UnOp>
|
| 87 |
+
static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op)
|
| 88 |
+
{
|
| 89 |
+
for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)
|
| 90 |
+
*out = op(*t);
|
| 91 |
+
return out;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
|
| 95 |
+
static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
|
| 96 |
+
{
|
| 97 |
+
unsigned int lane = laneId();
|
| 98 |
+
|
| 99 |
+
InIt1 t1 = beg1 + lane;
|
| 100 |
+
InIt2 t2 = beg2 + lane;
|
| 101 |
+
for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE)
|
| 102 |
+
*out = op(*t1, *t2);
|
| 103 |
+
return out;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
template <class T, class BinOp>
|
| 107 |
+
static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op)
|
| 108 |
+
{
|
| 109 |
+
const unsigned int lane = laneId();
|
| 110 |
+
|
| 111 |
+
if (lane < 16)
|
| 112 |
+
{
|
| 113 |
+
T partial = ptr[lane];
|
| 114 |
+
|
| 115 |
+
ptr[lane] = partial = op(partial, ptr[lane + 16]);
|
| 116 |
+
ptr[lane] = partial = op(partial, ptr[lane + 8]);
|
| 117 |
+
ptr[lane] = partial = op(partial, ptr[lane + 4]);
|
| 118 |
+
ptr[lane] = partial = op(partial, ptr[lane + 2]);
|
| 119 |
+
ptr[lane] = partial = op(partial, ptr[lane + 1]);
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
return *ptr;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
template<typename OutIt, typename T>
|
| 126 |
+
static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
|
| 127 |
+
{
|
| 128 |
+
unsigned int lane = laneId();
|
| 129 |
+
value += lane;
|
| 130 |
+
|
| 131 |
+
for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE)
|
| 132 |
+
*t = value;
|
| 133 |
+
}
|
| 134 |
+
};
|
| 135 |
+
}}} // namespace cv { namespace cuda { namespace cudev
|
| 136 |
+
|
| 137 |
+
//! @endcond
|
| 138 |
+
|
| 139 |
+
#endif /* OPENCV_CUDA_DEVICE_WARP_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda/warp_reduce.hpp
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_WARP_REDUCE_HPP__
|
| 44 |
+
#define OPENCV_CUDA_WARP_REDUCE_HPP__
|
| 45 |
+
|
| 46 |
+
/** @file
|
| 47 |
+
* @deprecated Use @ref cudev instead.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
template <class T>
|
| 55 |
+
__device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x)
|
| 56 |
+
{
|
| 57 |
+
const unsigned int lane = tid & 31; // index of thread in warp (0..31)
|
| 58 |
+
|
| 59 |
+
if (lane < 16)
|
| 60 |
+
{
|
| 61 |
+
T partial = ptr[tid];
|
| 62 |
+
|
| 63 |
+
ptr[tid] = partial = partial + ptr[tid + 16];
|
| 64 |
+
ptr[tid] = partial = partial + ptr[tid + 8];
|
| 65 |
+
ptr[tid] = partial = partial + ptr[tid + 4];
|
| 66 |
+
ptr[tid] = partial = partial + ptr[tid + 2];
|
| 67 |
+
ptr[tid] = partial = partial + ptr[tid + 1];
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
return ptr[tid - lane];
|
| 71 |
+
}
|
| 72 |
+
}}} // namespace cv { namespace cuda { namespace cudev {
|
| 73 |
+
|
| 74 |
+
//! @endcond
|
| 75 |
+
|
| 76 |
+
#endif /* OPENCV_CUDA_WARP_REDUCE_HPP__ */
|
3rdparty/opencv/include/opencv2/core/cuda/warp_shuffle.hpp
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CUDA_WARP_SHUFFLE_HPP
|
| 44 |
+
#define OPENCV_CUDA_WARP_SHUFFLE_HPP
|
| 45 |
+
|
| 46 |
+
/** @file
|
| 47 |
+
* @deprecated Use @ref cudev instead.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//! @cond IGNORED
|
| 51 |
+
|
| 52 |
+
namespace cv { namespace cuda { namespace device
|
| 53 |
+
{
|
| 54 |
+
#if __CUDACC_VER_MAJOR__ >= 9
|
| 55 |
+
# define __shfl(x, y, z) __shfl_sync(0xFFFFFFFFU, x, y, z)
|
| 56 |
+
# define __shfl_up(x, y, z) __shfl_up_sync(0xFFFFFFFFU, x, y, z)
|
| 57 |
+
# define __shfl_down(x, y, z) __shfl_down_sync(0xFFFFFFFFU, x, y, z)
|
| 58 |
+
#endif
|
| 59 |
+
template <typename T>
|
| 60 |
+
__device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize)
|
| 61 |
+
{
|
| 62 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 63 |
+
return __shfl(val, srcLane, width);
|
| 64 |
+
#else
|
| 65 |
+
return T();
|
| 66 |
+
#endif
|
| 67 |
+
}
|
| 68 |
+
__device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize)
|
| 69 |
+
{
|
| 70 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 71 |
+
return (unsigned int) __shfl((int) val, srcLane, width);
|
| 72 |
+
#else
|
| 73 |
+
return 0;
|
| 74 |
+
#endif
|
| 75 |
+
}
|
| 76 |
+
__device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize)
|
| 77 |
+
{
|
| 78 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 79 |
+
int lo = __double2loint(val);
|
| 80 |
+
int hi = __double2hiint(val);
|
| 81 |
+
|
| 82 |
+
lo = __shfl(lo, srcLane, width);
|
| 83 |
+
hi = __shfl(hi, srcLane, width);
|
| 84 |
+
|
| 85 |
+
return __hiloint2double(hi, lo);
|
| 86 |
+
#else
|
| 87 |
+
return 0.0;
|
| 88 |
+
#endif
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
template <typename T>
|
| 92 |
+
__device__ __forceinline__ T shfl_down(T val, unsigned int delta, int width = warpSize)
|
| 93 |
+
{
|
| 94 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 95 |
+
return __shfl_down(val, delta, width);
|
| 96 |
+
#else
|
| 97 |
+
return T();
|
| 98 |
+
#endif
|
| 99 |
+
}
|
| 100 |
+
__device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize)
|
| 101 |
+
{
|
| 102 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 103 |
+
return (unsigned int) __shfl_down((int) val, delta, width);
|
| 104 |
+
#else
|
| 105 |
+
return 0;
|
| 106 |
+
#endif
|
| 107 |
+
}
|
| 108 |
+
__device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize)
|
| 109 |
+
{
|
| 110 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 111 |
+
int lo = __double2loint(val);
|
| 112 |
+
int hi = __double2hiint(val);
|
| 113 |
+
|
| 114 |
+
lo = __shfl_down(lo, delta, width);
|
| 115 |
+
hi = __shfl_down(hi, delta, width);
|
| 116 |
+
|
| 117 |
+
return __hiloint2double(hi, lo);
|
| 118 |
+
#else
|
| 119 |
+
return 0.0;
|
| 120 |
+
#endif
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <typename T>
|
| 124 |
+
__device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize)
|
| 125 |
+
{
|
| 126 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 127 |
+
return __shfl_up(val, delta, width);
|
| 128 |
+
#else
|
| 129 |
+
return T();
|
| 130 |
+
#endif
|
| 131 |
+
}
|
| 132 |
+
__device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize)
|
| 133 |
+
{
|
| 134 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 135 |
+
return (unsigned int) __shfl_up((int) val, delta, width);
|
| 136 |
+
#else
|
| 137 |
+
return 0;
|
| 138 |
+
#endif
|
| 139 |
+
}
|
| 140 |
+
__device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize)
|
| 141 |
+
{
|
| 142 |
+
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
|
| 143 |
+
int lo = __double2loint(val);
|
| 144 |
+
int hi = __double2hiint(val);
|
| 145 |
+
|
| 146 |
+
lo = __shfl_up(lo, delta, width);
|
| 147 |
+
hi = __shfl_up(hi, delta, width);
|
| 148 |
+
|
| 149 |
+
return __hiloint2double(hi, lo);
|
| 150 |
+
#else
|
| 151 |
+
return 0.0;
|
| 152 |
+
#endif
|
| 153 |
+
}
|
| 154 |
+
}}}
|
| 155 |
+
|
| 156 |
+
# undef __shfl
|
| 157 |
+
# undef __shfl_up
|
| 158 |
+
# undef __shfl_down
|
| 159 |
+
|
| 160 |
+
//! @endcond
|
| 161 |
+
|
| 162 |
+
#endif // OPENCV_CUDA_WARP_SHUFFLE_HPP
|
3rdparty/opencv/include/opencv2/core/cuda_stream_accessor.hpp
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP
|
| 44 |
+
#define OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP
|
| 45 |
+
|
| 46 |
+
#ifndef __cplusplus
|
| 47 |
+
# error cuda_stream_accessor.hpp header must be compiled as C++
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
/** @file cuda_stream_accessor.hpp
|
| 51 |
+
* This is only header file that depends on CUDA Runtime API. All other headers are independent.
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
#include <cuda_runtime.h>
|
| 55 |
+
#include "opencv2/core/cuda.hpp"
|
| 56 |
+
|
| 57 |
+
namespace cv
|
| 58 |
+
{
|
| 59 |
+
namespace cuda
|
| 60 |
+
{
|
| 61 |
+
|
| 62 |
+
//! @addtogroup cudacore_struct
|
| 63 |
+
//! @{
|
| 64 |
+
|
| 65 |
+
/** @brief Class that enables getting cudaStream_t from cuda::Stream
|
| 66 |
+
*/
|
| 67 |
+
struct StreamAccessor
|
| 68 |
+
{
|
| 69 |
+
CV_EXPORTS static cudaStream_t getStream(const Stream& stream);
|
| 70 |
+
CV_EXPORTS static Stream wrapStream(cudaStream_t stream);
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
/** @brief Class that enables getting cudaEvent_t from cuda::Event
|
| 74 |
+
*/
|
| 75 |
+
struct EventAccessor
|
| 76 |
+
{
|
| 77 |
+
CV_EXPORTS static cudaEvent_t getEvent(const Event& event);
|
| 78 |
+
CV_EXPORTS static Event wrapEvent(cudaEvent_t event);
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
//! @}
|
| 82 |
+
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
#endif /* OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP */
|
3rdparty/opencv/include/opencv2/core/cuda_types.hpp
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Third party copyrights are property of their respective owners.
|
| 16 |
+
//
|
| 17 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 18 |
+
// are permitted provided that the following conditions are met:
|
| 19 |
+
//
|
| 20 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 21 |
+
// this list of conditions and the following disclaimer.
|
| 22 |
+
//
|
| 23 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 24 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 25 |
+
// and/or other materials provided with the distribution.
|
| 26 |
+
//
|
| 27 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 28 |
+
// derived from this software without specific prior written permission.
|
| 29 |
+
//
|
| 30 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 31 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 32 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 33 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 34 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 35 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 36 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 37 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 38 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 39 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 40 |
+
//
|
| 41 |
+
//M*/
|
| 42 |
+
|
| 43 |
+
#ifndef OPENCV_CORE_CUDA_TYPES_HPP
|
| 44 |
+
#define OPENCV_CORE_CUDA_TYPES_HPP
|
| 45 |
+
|
| 46 |
+
#ifndef __cplusplus
|
| 47 |
+
# error cuda_types.hpp header must be compiled as C++
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
#if defined(__OPENCV_BUILD) && defined(__clang__)
|
| 51 |
+
#pragma clang diagnostic ignored "-Winconsistent-missing-override"
|
| 52 |
+
#endif
|
| 53 |
+
#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5
|
| 54 |
+
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
/** @file
|
| 58 |
+
* @deprecated Use @ref cudev instead.
|
| 59 |
+
*/
|
| 60 |
+
|
| 61 |
+
//! @cond IGNORED
|
| 62 |
+
|
| 63 |
+
#ifdef __CUDACC__
|
| 64 |
+
#define __CV_CUDA_HOST_DEVICE__ __host__ __device__ __forceinline__
|
| 65 |
+
#else
|
| 66 |
+
#define __CV_CUDA_HOST_DEVICE__
|
| 67 |
+
#endif
|
| 68 |
+
|
| 69 |
+
#include "opencv2/core/cvdef.h"
|
| 70 |
+
#include "opencv2/core.hpp"
|
| 71 |
+
|
| 72 |
+
namespace cv
|
| 73 |
+
{
|
| 74 |
+
namespace cuda
|
| 75 |
+
{
|
| 76 |
+
|
| 77 |
+
// Simple lightweight structures that encapsulates information about an image on device.
|
| 78 |
+
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
|
| 79 |
+
|
| 80 |
+
template <typename T> struct DevPtr
|
| 81 |
+
{
|
| 82 |
+
typedef T elem_type;
|
| 83 |
+
typedef int index_type;
|
| 84 |
+
|
| 85 |
+
enum { elem_size = sizeof(elem_type) };
|
| 86 |
+
|
| 87 |
+
T* data;
|
| 88 |
+
|
| 89 |
+
__CV_CUDA_HOST_DEVICE__ DevPtr() : data(0) {}
|
| 90 |
+
__CV_CUDA_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
|
| 91 |
+
|
| 92 |
+
__CV_CUDA_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
|
| 93 |
+
__CV_CUDA_HOST_DEVICE__ operator T*() { return data; }
|
| 94 |
+
__CV_CUDA_HOST_DEVICE__ operator const T*() const { return data; }
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
template <typename T> struct PtrSz : public DevPtr<T>
|
| 98 |
+
{
|
| 99 |
+
__CV_CUDA_HOST_DEVICE__ PtrSz() : size(0) {}
|
| 100 |
+
__CV_CUDA_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
|
| 101 |
+
|
| 102 |
+
size_t size;
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
template <typename T> struct PtrStep : public DevPtr<T>
|
| 106 |
+
{
|
| 107 |
+
__CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {}
|
| 108 |
+
__CV_CUDA_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
|
| 109 |
+
|
| 110 |
+
size_t step;
|
| 111 |
+
|
| 112 |
+
__CV_CUDA_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)(((DevPtr<T>*)this)->data) + y * step); }
|
| 113 |
+
__CV_CUDA_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)(((DevPtr<T>*)this)->data) + y * step); }
|
| 114 |
+
|
| 115 |
+
__CV_CUDA_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
|
| 116 |
+
__CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
template <typename T> struct PtrStepSz : public PtrStep<T>
|
| 120 |
+
{
|
| 121 |
+
__CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
|
| 122 |
+
__CV_CUDA_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
|
| 123 |
+
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
|
| 124 |
+
|
| 125 |
+
template <typename U>
|
| 126 |
+
explicit PtrStepSz(const PtrStepSz<U>& d) : PtrStep<T>((T*)d.data, d.step), cols(d.cols), rows(d.rows){}
|
| 127 |
+
|
| 128 |
+
int cols;
|
| 129 |
+
int rows;
|
| 130 |
+
|
| 131 |
+
CV_NODISCARD_STD __CV_CUDA_HOST_DEVICE__ Size size() const { return {cols, rows}; }
|
| 132 |
+
CV_NODISCARD_STD __CV_CUDA_HOST_DEVICE__ T& operator ()(const Point &pos) { return (*this)(pos.y, pos.x); }
|
| 133 |
+
CV_NODISCARD_STD __CV_CUDA_HOST_DEVICE__ const T& operator ()(const Point &pos) const { return (*this)(pos.y, pos.x); }
|
| 134 |
+
using PtrStep<T>::operator();
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
typedef PtrStepSz<unsigned char> PtrStepSzb;
|
| 138 |
+
typedef PtrStepSz<unsigned short> PtrStepSzus;
|
| 139 |
+
typedef PtrStepSz<float> PtrStepSzf;
|
| 140 |
+
typedef PtrStepSz<int> PtrStepSzi;
|
| 141 |
+
|
| 142 |
+
typedef PtrStep<unsigned char> PtrStepb;
|
| 143 |
+
typedef PtrStep<unsigned short> PtrStepus;
|
| 144 |
+
typedef PtrStep<float> PtrStepf;
|
| 145 |
+
typedef PtrStep<int> PtrStepi;
|
| 146 |
+
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
//! @endcond
|
| 151 |
+
|
| 152 |
+
#endif /* OPENCV_CORE_CUDA_TYPES_HPP */
|
3rdparty/opencv/include/opencv2/core/cv_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of OpenCV project.
|
| 2 |
+
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
| 3 |
+
// of this distribution and at http://opencv.org/license.html.
|
| 4 |
+
|
| 5 |
+
#if defined __OPENCV_BUILD \
|
| 6 |
+
|
| 7 |
+
#include "cv_cpu_config.h"
|
| 8 |
+
#include "cv_cpu_helper.h"
|
| 9 |
+
|
| 10 |
+
#ifdef CV_CPU_DISPATCH_MODE
|
| 11 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE __CV_CAT(opt_, CV_CPU_DISPATCH_MODE)
|
| 12 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace __CV_CAT(opt_, CV_CPU_DISPATCH_MODE) {
|
| 13 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
|
| 14 |
+
#else
|
| 15 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE cpu_baseline
|
| 16 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace cpu_baseline {
|
| 17 |
+
#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
|
| 18 |
+
#define CV_CPU_BASELINE_MODE 1
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
#define __CV_CPU_DISPATCH_CHAIN_END(fn, args, mode, ...) /* done */
|
| 23 |
+
#define __CV_CPU_DISPATCH(fn, args, mode, ...) __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 24 |
+
#define __CV_CPU_DISPATCH_EXPAND(fn, args, ...) __CV_EXPAND(__CV_CPU_DISPATCH(fn, args, __VA_ARGS__))
|
| 25 |
+
#define CV_CPU_DISPATCH(fn, args, ...) __CV_CPU_DISPATCH_EXPAND(fn, args, __VA_ARGS__, END) // expand macros
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
#if defined CV_ENABLE_INTRINSICS \
|
| 29 |
+
&& !defined CV_DISABLE_OPTIMIZATION \
|
| 30 |
+
&& !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */ \
|
| 31 |
+
|
| 32 |
+
#ifdef CV_CPU_COMPILE_SSE2
|
| 33 |
+
# include <emmintrin.h>
|
| 34 |
+
# define CV_MMX 1
|
| 35 |
+
# define CV_SSE 1
|
| 36 |
+
# define CV_SSE2 1
|
| 37 |
+
#endif
|
| 38 |
+
#ifdef CV_CPU_COMPILE_SSE3
|
| 39 |
+
# include <pmmintrin.h>
|
| 40 |
+
# define CV_SSE3 1
|
| 41 |
+
#endif
|
| 42 |
+
#ifdef CV_CPU_COMPILE_SSSE3
|
| 43 |
+
# include <tmmintrin.h>
|
| 44 |
+
# define CV_SSSE3 1
|
| 45 |
+
#endif
|
| 46 |
+
#ifdef CV_CPU_COMPILE_SSE4_1
|
| 47 |
+
# include <smmintrin.h>
|
| 48 |
+
# define CV_SSE4_1 1
|
| 49 |
+
#endif
|
| 50 |
+
#ifdef CV_CPU_COMPILE_SSE4_2
|
| 51 |
+
# include <nmmintrin.h>
|
| 52 |
+
# define CV_SSE4_2 1
|
| 53 |
+
#endif
|
| 54 |
+
#ifdef CV_CPU_COMPILE_POPCNT
|
| 55 |
+
# ifdef _MSC_VER
|
| 56 |
+
# include <nmmintrin.h>
|
| 57 |
+
# if defined(_M_X64)
|
| 58 |
+
# define CV_POPCNT_U64 (int)_mm_popcnt_u64
|
| 59 |
+
# endif
|
| 60 |
+
# define CV_POPCNT_U32 _mm_popcnt_u32
|
| 61 |
+
# else
|
| 62 |
+
# include <popcntintrin.h>
|
| 63 |
+
# if defined(__x86_64__)
|
| 64 |
+
# define CV_POPCNT_U64 __builtin_popcountll
|
| 65 |
+
# endif
|
| 66 |
+
# define CV_POPCNT_U32 __builtin_popcount
|
| 67 |
+
# endif
|
| 68 |
+
# define CV_POPCNT 1
|
| 69 |
+
#endif
|
| 70 |
+
#ifdef CV_CPU_COMPILE_AVX
|
| 71 |
+
# include <immintrin.h>
|
| 72 |
+
# define CV_AVX 1
|
| 73 |
+
#endif
|
| 74 |
+
#ifdef CV_CPU_COMPILE_FP16
|
| 75 |
+
# if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || defined(_M_ARM64)
|
| 76 |
+
# include <arm_neon.h>
|
| 77 |
+
# else
|
| 78 |
+
# include <immintrin.h>
|
| 79 |
+
# endif
|
| 80 |
+
# define CV_FP16 1
|
| 81 |
+
#endif
|
| 82 |
+
#ifdef CV_CPU_COMPILE_NEON_DOTPROD
|
| 83 |
+
# include <arm_neon.h>
|
| 84 |
+
# define CV_NEON_DOT 1
|
| 85 |
+
#endif
|
| 86 |
+
#ifdef CV_CPU_COMPILE_AVX2
|
| 87 |
+
# include <immintrin.h>
|
| 88 |
+
# define CV_AVX2 1
|
| 89 |
+
#endif
|
| 90 |
+
#ifdef CV_CPU_COMPILE_AVX_512F
|
| 91 |
+
# include <immintrin.h>
|
| 92 |
+
# define CV_AVX_512F 1
|
| 93 |
+
#endif
|
| 94 |
+
#ifdef CV_CPU_COMPILE_AVX512_COMMON
|
| 95 |
+
# define CV_AVX512_COMMON 1
|
| 96 |
+
# define CV_AVX_512CD 1
|
| 97 |
+
#endif
|
| 98 |
+
#ifdef CV_CPU_COMPILE_AVX512_KNL
|
| 99 |
+
# define CV_AVX512_KNL 1
|
| 100 |
+
# define CV_AVX_512ER 1
|
| 101 |
+
# define CV_AVX_512PF 1
|
| 102 |
+
#endif
|
| 103 |
+
#ifdef CV_CPU_COMPILE_AVX512_KNM
|
| 104 |
+
# define CV_AVX512_KNM 1
|
| 105 |
+
# define CV_AVX_5124FMAPS 1
|
| 106 |
+
# define CV_AVX_5124VNNIW 1
|
| 107 |
+
# define CV_AVX_512VPOPCNTDQ 1
|
| 108 |
+
#endif
|
| 109 |
+
#ifdef CV_CPU_COMPILE_AVX512_SKX
|
| 110 |
+
# define CV_AVX512_SKX 1
|
| 111 |
+
# define CV_AVX_512VL 1
|
| 112 |
+
# define CV_AVX_512BW 1
|
| 113 |
+
# define CV_AVX_512DQ 1
|
| 114 |
+
#endif
|
| 115 |
+
#ifdef CV_CPU_COMPILE_AVX512_CNL
|
| 116 |
+
# define CV_AVX512_CNL 1
|
| 117 |
+
# define CV_AVX_512IFMA 1
|
| 118 |
+
# define CV_AVX_512VBMI 1
|
| 119 |
+
#endif
|
| 120 |
+
#ifdef CV_CPU_COMPILE_AVX512_CLX
|
| 121 |
+
# define CV_AVX512_CLX 1
|
| 122 |
+
# define CV_AVX_512VNNI 1
|
| 123 |
+
#endif
|
| 124 |
+
#ifdef CV_CPU_COMPILE_AVX512_ICL
|
| 125 |
+
# define CV_AVX512_ICL 1
|
| 126 |
+
# undef CV_AVX_512IFMA
|
| 127 |
+
# define CV_AVX_512IFMA 1
|
| 128 |
+
# undef CV_AVX_512VBMI
|
| 129 |
+
# define CV_AVX_512VBMI 1
|
| 130 |
+
# undef CV_AVX_512VNNI
|
| 131 |
+
# define CV_AVX_512VNNI 1
|
| 132 |
+
# define CV_AVX_512VBMI2 1
|
| 133 |
+
# define CV_AVX_512BITALG 1
|
| 134 |
+
# define CV_AVX_512VPOPCNTDQ 1
|
| 135 |
+
#endif
|
| 136 |
+
#ifdef CV_CPU_COMPILE_FMA3
|
| 137 |
+
# define CV_FMA3 1
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
#if defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64)) && (defined(CV_CPU_COMPILE_NEON) || !defined(_MSC_VER))
|
| 141 |
+
# include <Intrin.h>
|
| 142 |
+
# include <arm_neon.h>
|
| 143 |
+
# define CV_NEON 1
|
| 144 |
+
#elif defined(__ARM_NEON)
|
| 145 |
+
# include <arm_neon.h>
|
| 146 |
+
# define CV_NEON 1
|
| 147 |
+
#endif
|
| 148 |
+
|
| 149 |
+
/* RVV-related macro states with different compiler
|
| 150 |
+
// +--------------------+----------+----------+
|
| 151 |
+
// | Macro | Upstream | XuanTie |
|
| 152 |
+
// +--------------------+----------+----------+
|
| 153 |
+
// | CV_CPU_COMPILE_RVV | defined | defined |
|
| 154 |
+
// | CV_RVV | 1 | 0 |
|
| 155 |
+
// | CV_RVV071 | 0 | 1 |
|
| 156 |
+
// | CV_TRY_RVV | 1 | 1 |
|
| 157 |
+
// +--------------------+----------+----------+
|
| 158 |
+
*/
|
| 159 |
+
#ifdef CV_CPU_COMPILE_RVV
|
| 160 |
+
# ifdef __riscv_vector_071
|
| 161 |
+
# define CV_RVV071 1
|
| 162 |
+
# else
|
| 163 |
+
# define CV_RVV 1
|
| 164 |
+
# endif
|
| 165 |
+
#include <riscv_vector.h>
|
| 166 |
+
#endif
|
| 167 |
+
|
| 168 |
+
#ifdef CV_CPU_COMPILE_VSX
|
| 169 |
+
# include <altivec.h>
|
| 170 |
+
# undef vector
|
| 171 |
+
# undef pixel
|
| 172 |
+
# undef bool
|
| 173 |
+
# define CV_VSX 1
|
| 174 |
+
#endif
|
| 175 |
+
|
| 176 |
+
#ifdef CV_CPU_COMPILE_VSX3
|
| 177 |
+
# define CV_VSX3 1
|
| 178 |
+
#endif
|
| 179 |
+
|
| 180 |
+
#ifdef CV_CPU_COMPILE_MSA
|
| 181 |
+
# include "hal/msa_macros.h"
|
| 182 |
+
# define CV_MSA 1
|
| 183 |
+
#endif
|
| 184 |
+
|
| 185 |
+
#ifdef CV_CPU_COMPILE_LSX
|
| 186 |
+
# include <lsxintrin.h>
|
| 187 |
+
# define CV_LSX 1
|
| 188 |
+
#endif
|
| 189 |
+
|
| 190 |
+
#ifdef CV_CPU_COMPILE_LASX
|
| 191 |
+
# include <lasxintrin.h>
|
| 192 |
+
# define CV_LASX 1
|
| 193 |
+
#endif
|
| 194 |
+
|
| 195 |
+
#ifdef __EMSCRIPTEN__
|
| 196 |
+
# define CV_WASM_SIMD 1
|
| 197 |
+
# include <wasm_simd128.h>
|
| 198 |
+
#endif
|
| 199 |
+
|
| 200 |
+
#endif // CV_ENABLE_INTRINSICS && !CV_DISABLE_OPTIMIZATION && !__CUDACC__
|
| 201 |
+
|
| 202 |
+
#if defined CV_CPU_COMPILE_AVX && !defined CV_CPU_BASELINE_COMPILE_AVX
|
| 203 |
+
struct VZeroUpperGuard {
|
| 204 |
+
#ifdef __GNUC__
|
| 205 |
+
__attribute__((always_inline))
|
| 206 |
+
#endif
|
| 207 |
+
inline VZeroUpperGuard() { _mm256_zeroupper(); }
|
| 208 |
+
#ifdef __GNUC__
|
| 209 |
+
__attribute__((always_inline))
|
| 210 |
+
#endif
|
| 211 |
+
inline ~VZeroUpperGuard() { _mm256_zeroupper(); }
|
| 212 |
+
};
|
| 213 |
+
#define __CV_AVX_GUARD VZeroUpperGuard __vzeroupper_guard; CV_UNUSED(__vzeroupper_guard);
|
| 214 |
+
#endif
|
| 215 |
+
|
| 216 |
+
#ifdef __CV_AVX_GUARD
|
| 217 |
+
#define CV_AVX_GUARD __CV_AVX_GUARD
|
| 218 |
+
#else
|
| 219 |
+
#define CV_AVX_GUARD
|
| 220 |
+
#endif
|
| 221 |
+
|
| 222 |
+
#endif // __OPENCV_BUILD
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
#if !defined __OPENCV_BUILD /* Compatibility code */ \
|
| 227 |
+
&& !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */
|
| 228 |
+
#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
|
| 229 |
+
# include <emmintrin.h>
|
| 230 |
+
# define CV_MMX 1
|
| 231 |
+
# define CV_SSE 1
|
| 232 |
+
# define CV_SSE2 1
|
| 233 |
+
#elif defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64)) && (defined(CV_CPU_COMPILE_NEON) || !defined(_MSC_VER))
|
| 234 |
+
# include <Intrin.h>
|
| 235 |
+
# include <arm_neon.h>
|
| 236 |
+
# define CV_NEON 1
|
| 237 |
+
#elif defined(__ARM_NEON)
|
| 238 |
+
# include <arm_neon.h>
|
| 239 |
+
# define CV_NEON 1
|
| 240 |
+
#elif defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
|
| 241 |
+
# include <altivec.h>
|
| 242 |
+
# undef vector
|
| 243 |
+
# undef pixel
|
| 244 |
+
# undef bool
|
| 245 |
+
# define CV_VSX 1
|
| 246 |
+
#endif
|
| 247 |
+
|
| 248 |
+
#ifdef __F16C__
|
| 249 |
+
# include <immintrin.h>
|
| 250 |
+
# define CV_FP16 1
|
| 251 |
+
#endif
|
| 252 |
+
|
| 253 |
+
#endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
#ifndef CV_MMX
|
| 258 |
+
# define CV_MMX 0
|
| 259 |
+
#endif
|
| 260 |
+
#ifndef CV_SSE
|
| 261 |
+
# define CV_SSE 0
|
| 262 |
+
#endif
|
| 263 |
+
#ifndef CV_SSE2
|
| 264 |
+
# define CV_SSE2 0
|
| 265 |
+
#endif
|
| 266 |
+
#ifndef CV_SSE3
|
| 267 |
+
# define CV_SSE3 0
|
| 268 |
+
#endif
|
| 269 |
+
#ifndef CV_SSSE3
|
| 270 |
+
# define CV_SSSE3 0
|
| 271 |
+
#endif
|
| 272 |
+
#ifndef CV_SSE4_1
|
| 273 |
+
# define CV_SSE4_1 0
|
| 274 |
+
#endif
|
| 275 |
+
#ifndef CV_SSE4_2
|
| 276 |
+
# define CV_SSE4_2 0
|
| 277 |
+
#endif
|
| 278 |
+
#ifndef CV_POPCNT
|
| 279 |
+
# define CV_POPCNT 0
|
| 280 |
+
#endif
|
| 281 |
+
#ifndef CV_AVX
|
| 282 |
+
# define CV_AVX 0
|
| 283 |
+
#endif
|
| 284 |
+
#ifndef CV_FP16
|
| 285 |
+
# define CV_FP16 0
|
| 286 |
+
#endif
|
| 287 |
+
#ifndef CV_AVX2
|
| 288 |
+
# define CV_AVX2 0
|
| 289 |
+
#endif
|
| 290 |
+
#ifndef CV_FMA3
|
| 291 |
+
# define CV_FMA3 0
|
| 292 |
+
#endif
|
| 293 |
+
#ifndef CV_AVX_512F
|
| 294 |
+
# define CV_AVX_512F 0
|
| 295 |
+
#endif
|
| 296 |
+
#ifndef CV_AVX_512BW
|
| 297 |
+
# define CV_AVX_512BW 0
|
| 298 |
+
#endif
|
| 299 |
+
#ifndef CV_AVX_512CD
|
| 300 |
+
# define CV_AVX_512CD 0
|
| 301 |
+
#endif
|
| 302 |
+
#ifndef CV_AVX_512DQ
|
| 303 |
+
# define CV_AVX_512DQ 0
|
| 304 |
+
#endif
|
| 305 |
+
#ifndef CV_AVX_512ER
|
| 306 |
+
# define CV_AVX_512ER 0
|
| 307 |
+
#endif
|
| 308 |
+
#ifndef CV_AVX_512IFMA
|
| 309 |
+
# define CV_AVX_512IFMA 0
|
| 310 |
+
#endif
|
| 311 |
+
#define CV_AVX_512IFMA512 CV_AVX_512IFMA // deprecated
|
| 312 |
+
#ifndef CV_AVX_512PF
|
| 313 |
+
# define CV_AVX_512PF 0
|
| 314 |
+
#endif
|
| 315 |
+
#ifndef CV_AVX_512VBMI
|
| 316 |
+
# define CV_AVX_512VBMI 0
|
| 317 |
+
#endif
|
| 318 |
+
#ifndef CV_AVX_512VL
|
| 319 |
+
# define CV_AVX_512VL 0
|
| 320 |
+
#endif
|
| 321 |
+
#ifndef CV_AVX_5124FMAPS
|
| 322 |
+
# define CV_AVX_5124FMAPS 0
|
| 323 |
+
#endif
|
| 324 |
+
#ifndef CV_AVX_5124VNNIW
|
| 325 |
+
# define CV_AVX_5124VNNIW 0
|
| 326 |
+
#endif
|
| 327 |
+
#ifndef CV_AVX_512VPOPCNTDQ
|
| 328 |
+
# define CV_AVX_512VPOPCNTDQ 0
|
| 329 |
+
#endif
|
| 330 |
+
#ifndef CV_AVX_512VNNI
|
| 331 |
+
# define CV_AVX_512VNNI 0
|
| 332 |
+
#endif
|
| 333 |
+
#ifndef CV_AVX_512VBMI2
|
| 334 |
+
# define CV_AVX_512VBMI2 0
|
| 335 |
+
#endif
|
| 336 |
+
#ifndef CV_AVX_512BITALG
|
| 337 |
+
# define CV_AVX_512BITALG 0
|
| 338 |
+
#endif
|
| 339 |
+
#ifndef CV_AVX512_COMMON
|
| 340 |
+
# define CV_AVX512_COMMON 0
|
| 341 |
+
#endif
|
| 342 |
+
#ifndef CV_AVX512_KNL
|
| 343 |
+
# define CV_AVX512_KNL 0
|
| 344 |
+
#endif
|
| 345 |
+
#ifndef CV_AVX512_KNM
|
| 346 |
+
# define CV_AVX512_KNM 0
|
| 347 |
+
#endif
|
| 348 |
+
#ifndef CV_AVX512_SKX
|
| 349 |
+
# define CV_AVX512_SKX 0
|
| 350 |
+
#endif
|
| 351 |
+
#ifndef CV_AVX512_CNL
|
| 352 |
+
# define CV_AVX512_CNL 0
|
| 353 |
+
#endif
|
| 354 |
+
#ifndef CV_AVX512_CLX
|
| 355 |
+
# define CV_AVX512_CLX 0
|
| 356 |
+
#endif
|
| 357 |
+
#ifndef CV_AVX512_ICL
|
| 358 |
+
# define CV_AVX512_ICL 0
|
| 359 |
+
#endif
|
| 360 |
+
|
| 361 |
+
#ifndef CV_NEON
|
| 362 |
+
# define CV_NEON 0
|
| 363 |
+
#endif
|
| 364 |
+
|
| 365 |
+
#ifndef CV_RVV071
|
| 366 |
+
# define CV_RVV071 0
|
| 367 |
+
#endif
|
| 368 |
+
|
| 369 |
+
#ifndef CV_VSX
|
| 370 |
+
# define CV_VSX 0
|
| 371 |
+
#endif
|
| 372 |
+
|
| 373 |
+
#ifndef CV_VSX3
|
| 374 |
+
# define CV_VSX3 0
|
| 375 |
+
#endif
|
| 376 |
+
|
| 377 |
+
#ifndef CV_MSA
|
| 378 |
+
# define CV_MSA 0
|
| 379 |
+
#endif
|
| 380 |
+
|
| 381 |
+
#ifndef CV_WASM_SIMD
|
| 382 |
+
# define CV_WASM_SIMD 0
|
| 383 |
+
#endif
|
| 384 |
+
|
| 385 |
+
#ifndef CV_RVV
|
| 386 |
+
# define CV_RVV 0
|
| 387 |
+
#endif
|
| 388 |
+
|
| 389 |
+
#ifndef CV_LSX
|
| 390 |
+
# define CV_LSX 0
|
| 391 |
+
#endif
|
| 392 |
+
|
| 393 |
+
#ifndef CV_LASX
|
| 394 |
+
# define CV_LASX 0
|
| 395 |
+
#endif
|
3rdparty/opencv/include/opencv2/core/cv_cpu_helper.h
ADDED
|
@@ -0,0 +1,613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// AUTOGENERATED, DO NOT EDIT
|
| 2 |
+
|
| 3 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE
|
| 4 |
+
# define CV_TRY_SSE 1
|
| 5 |
+
# define CV_CPU_FORCE_SSE 1
|
| 6 |
+
# define CV_CPU_HAS_SUPPORT_SSE 1
|
| 7 |
+
# define CV_CPU_CALL_SSE(fn, args) return (cpu_baseline::fn args)
|
| 8 |
+
# define CV_CPU_CALL_SSE_(fn, args) return (opt_SSE::fn args)
|
| 9 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE
|
| 10 |
+
# define CV_TRY_SSE 1
|
| 11 |
+
# define CV_CPU_FORCE_SSE 0
|
| 12 |
+
# define CV_CPU_HAS_SUPPORT_SSE (cv::checkHardwareSupport(CV_CPU_SSE))
|
| 13 |
+
# define CV_CPU_CALL_SSE(fn, args) if (CV_CPU_HAS_SUPPORT_SSE) return (opt_SSE::fn args)
|
| 14 |
+
# define CV_CPU_CALL_SSE_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE) return (opt_SSE::fn args)
|
| 15 |
+
#else
|
| 16 |
+
# define CV_TRY_SSE 0
|
| 17 |
+
# define CV_CPU_FORCE_SSE 0
|
| 18 |
+
# define CV_CPU_HAS_SUPPORT_SSE 0
|
| 19 |
+
# define CV_CPU_CALL_SSE(fn, args)
|
| 20 |
+
# define CV_CPU_CALL_SSE_(fn, args)
|
| 21 |
+
#endif
|
| 22 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSE(fn, args, mode, ...) CV_CPU_CALL_SSE(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 23 |
+
|
| 24 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE2
|
| 25 |
+
# define CV_TRY_SSE2 1
|
| 26 |
+
# define CV_CPU_FORCE_SSE2 1
|
| 27 |
+
# define CV_CPU_HAS_SUPPORT_SSE2 1
|
| 28 |
+
# define CV_CPU_CALL_SSE2(fn, args) return (cpu_baseline::fn args)
|
| 29 |
+
# define CV_CPU_CALL_SSE2_(fn, args) return (opt_SSE2::fn args)
|
| 30 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE2
|
| 31 |
+
# define CV_TRY_SSE2 1
|
| 32 |
+
# define CV_CPU_FORCE_SSE2 0
|
| 33 |
+
# define CV_CPU_HAS_SUPPORT_SSE2 (cv::checkHardwareSupport(CV_CPU_SSE2))
|
| 34 |
+
# define CV_CPU_CALL_SSE2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE2) return (opt_SSE2::fn args)
|
| 35 |
+
# define CV_CPU_CALL_SSE2_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE2) return (opt_SSE2::fn args)
|
| 36 |
+
#else
|
| 37 |
+
# define CV_TRY_SSE2 0
|
| 38 |
+
# define CV_CPU_FORCE_SSE2 0
|
| 39 |
+
# define CV_CPU_HAS_SUPPORT_SSE2 0
|
| 40 |
+
# define CV_CPU_CALL_SSE2(fn, args)
|
| 41 |
+
# define CV_CPU_CALL_SSE2_(fn, args)
|
| 42 |
+
#endif
|
| 43 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSE2(fn, args, mode, ...) CV_CPU_CALL_SSE2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 44 |
+
|
| 45 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE3
|
| 46 |
+
# define CV_TRY_SSE3 1
|
| 47 |
+
# define CV_CPU_FORCE_SSE3 1
|
| 48 |
+
# define CV_CPU_HAS_SUPPORT_SSE3 1
|
| 49 |
+
# define CV_CPU_CALL_SSE3(fn, args) return (cpu_baseline::fn args)
|
| 50 |
+
# define CV_CPU_CALL_SSE3_(fn, args) return (opt_SSE3::fn args)
|
| 51 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE3
|
| 52 |
+
# define CV_TRY_SSE3 1
|
| 53 |
+
# define CV_CPU_FORCE_SSE3 0
|
| 54 |
+
# define CV_CPU_HAS_SUPPORT_SSE3 (cv::checkHardwareSupport(CV_CPU_SSE3))
|
| 55 |
+
# define CV_CPU_CALL_SSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSE3) return (opt_SSE3::fn args)
|
| 56 |
+
# define CV_CPU_CALL_SSE3_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE3) return (opt_SSE3::fn args)
|
| 57 |
+
#else
|
| 58 |
+
# define CV_TRY_SSE3 0
|
| 59 |
+
# define CV_CPU_FORCE_SSE3 0
|
| 60 |
+
# define CV_CPU_HAS_SUPPORT_SSE3 0
|
| 61 |
+
# define CV_CPU_CALL_SSE3(fn, args)
|
| 62 |
+
# define CV_CPU_CALL_SSE3_(fn, args)
|
| 63 |
+
#endif
|
| 64 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSE3(fn, args, mode, ...) CV_CPU_CALL_SSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 65 |
+
|
| 66 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSSE3
|
| 67 |
+
# define CV_TRY_SSSE3 1
|
| 68 |
+
# define CV_CPU_FORCE_SSSE3 1
|
| 69 |
+
# define CV_CPU_HAS_SUPPORT_SSSE3 1
|
| 70 |
+
# define CV_CPU_CALL_SSSE3(fn, args) return (cpu_baseline::fn args)
|
| 71 |
+
# define CV_CPU_CALL_SSSE3_(fn, args) return (opt_SSSE3::fn args)
|
| 72 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSSE3
|
| 73 |
+
# define CV_TRY_SSSE3 1
|
| 74 |
+
# define CV_CPU_FORCE_SSSE3 0
|
| 75 |
+
# define CV_CPU_HAS_SUPPORT_SSSE3 (cv::checkHardwareSupport(CV_CPU_SSSE3))
|
| 76 |
+
# define CV_CPU_CALL_SSSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSSE3) return (opt_SSSE3::fn args)
|
| 77 |
+
# define CV_CPU_CALL_SSSE3_(fn, args) if (CV_CPU_HAS_SUPPORT_SSSE3) return (opt_SSSE3::fn args)
|
| 78 |
+
#else
|
| 79 |
+
# define CV_TRY_SSSE3 0
|
| 80 |
+
# define CV_CPU_FORCE_SSSE3 0
|
| 81 |
+
# define CV_CPU_HAS_SUPPORT_SSSE3 0
|
| 82 |
+
# define CV_CPU_CALL_SSSE3(fn, args)
|
| 83 |
+
# define CV_CPU_CALL_SSSE3_(fn, args)
|
| 84 |
+
#endif
|
| 85 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSSE3(fn, args, mode, ...) CV_CPU_CALL_SSSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 86 |
+
|
| 87 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_1
|
| 88 |
+
# define CV_TRY_SSE4_1 1
|
| 89 |
+
# define CV_CPU_FORCE_SSE4_1 1
|
| 90 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_1 1
|
| 91 |
+
# define CV_CPU_CALL_SSE4_1(fn, args) return (cpu_baseline::fn args)
|
| 92 |
+
# define CV_CPU_CALL_SSE4_1_(fn, args) return (opt_SSE4_1::fn args)
|
| 93 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_1
|
| 94 |
+
# define CV_TRY_SSE4_1 1
|
| 95 |
+
# define CV_CPU_FORCE_SSE4_1 0
|
| 96 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_1 (cv::checkHardwareSupport(CV_CPU_SSE4_1))
|
| 97 |
+
# define CV_CPU_CALL_SSE4_1(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_1) return (opt_SSE4_1::fn args)
|
| 98 |
+
# define CV_CPU_CALL_SSE4_1_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_1) return (opt_SSE4_1::fn args)
|
| 99 |
+
#else
|
| 100 |
+
# define CV_TRY_SSE4_1 0
|
| 101 |
+
# define CV_CPU_FORCE_SSE4_1 0
|
| 102 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_1 0
|
| 103 |
+
# define CV_CPU_CALL_SSE4_1(fn, args)
|
| 104 |
+
# define CV_CPU_CALL_SSE4_1_(fn, args)
|
| 105 |
+
#endif
|
| 106 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSE4_1(fn, args, mode, ...) CV_CPU_CALL_SSE4_1(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 107 |
+
|
| 108 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_2
|
| 109 |
+
# define CV_TRY_SSE4_2 1
|
| 110 |
+
# define CV_CPU_FORCE_SSE4_2 1
|
| 111 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_2 1
|
| 112 |
+
# define CV_CPU_CALL_SSE4_2(fn, args) return (cpu_baseline::fn args)
|
| 113 |
+
# define CV_CPU_CALL_SSE4_2_(fn, args) return (opt_SSE4_2::fn args)
|
| 114 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_2
|
| 115 |
+
# define CV_TRY_SSE4_2 1
|
| 116 |
+
# define CV_CPU_FORCE_SSE4_2 0
|
| 117 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_2 (cv::checkHardwareSupport(CV_CPU_SSE4_2))
|
| 118 |
+
# define CV_CPU_CALL_SSE4_2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_2) return (opt_SSE4_2::fn args)
|
| 119 |
+
# define CV_CPU_CALL_SSE4_2_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_2) return (opt_SSE4_2::fn args)
|
| 120 |
+
#else
|
| 121 |
+
# define CV_TRY_SSE4_2 0
|
| 122 |
+
# define CV_CPU_FORCE_SSE4_2 0
|
| 123 |
+
# define CV_CPU_HAS_SUPPORT_SSE4_2 0
|
| 124 |
+
# define CV_CPU_CALL_SSE4_2(fn, args)
|
| 125 |
+
# define CV_CPU_CALL_SSE4_2_(fn, args)
|
| 126 |
+
#endif
|
| 127 |
+
#define __CV_CPU_DISPATCH_CHAIN_SSE4_2(fn, args, mode, ...) CV_CPU_CALL_SSE4_2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 128 |
+
|
| 129 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_POPCNT
|
| 130 |
+
# define CV_TRY_POPCNT 1
|
| 131 |
+
# define CV_CPU_FORCE_POPCNT 1
|
| 132 |
+
# define CV_CPU_HAS_SUPPORT_POPCNT 1
|
| 133 |
+
# define CV_CPU_CALL_POPCNT(fn, args) return (cpu_baseline::fn args)
|
| 134 |
+
# define CV_CPU_CALL_POPCNT_(fn, args) return (opt_POPCNT::fn args)
|
| 135 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_POPCNT
|
| 136 |
+
# define CV_TRY_POPCNT 1
|
| 137 |
+
# define CV_CPU_FORCE_POPCNT 0
|
| 138 |
+
# define CV_CPU_HAS_SUPPORT_POPCNT (cv::checkHardwareSupport(CV_CPU_POPCNT))
|
| 139 |
+
# define CV_CPU_CALL_POPCNT(fn, args) if (CV_CPU_HAS_SUPPORT_POPCNT) return (opt_POPCNT::fn args)
|
| 140 |
+
# define CV_CPU_CALL_POPCNT_(fn, args) if (CV_CPU_HAS_SUPPORT_POPCNT) return (opt_POPCNT::fn args)
|
| 141 |
+
#else
|
| 142 |
+
# define CV_TRY_POPCNT 0
|
| 143 |
+
# define CV_CPU_FORCE_POPCNT 0
|
| 144 |
+
# define CV_CPU_HAS_SUPPORT_POPCNT 0
|
| 145 |
+
# define CV_CPU_CALL_POPCNT(fn, args)
|
| 146 |
+
# define CV_CPU_CALL_POPCNT_(fn, args)
|
| 147 |
+
#endif
|
| 148 |
+
#define __CV_CPU_DISPATCH_CHAIN_POPCNT(fn, args, mode, ...) CV_CPU_CALL_POPCNT(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 149 |
+
|
| 150 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX
|
| 151 |
+
# define CV_TRY_AVX 1
|
| 152 |
+
# define CV_CPU_FORCE_AVX 1
|
| 153 |
+
# define CV_CPU_HAS_SUPPORT_AVX 1
|
| 154 |
+
# define CV_CPU_CALL_AVX(fn, args) return (cpu_baseline::fn args)
|
| 155 |
+
# define CV_CPU_CALL_AVX_(fn, args) return (opt_AVX::fn args)
|
| 156 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX
|
| 157 |
+
# define CV_TRY_AVX 1
|
| 158 |
+
# define CV_CPU_FORCE_AVX 0
|
| 159 |
+
# define CV_CPU_HAS_SUPPORT_AVX (cv::checkHardwareSupport(CV_CPU_AVX))
|
| 160 |
+
# define CV_CPU_CALL_AVX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX) return (opt_AVX::fn args)
|
| 161 |
+
# define CV_CPU_CALL_AVX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX) return (opt_AVX::fn args)
|
| 162 |
+
#else
|
| 163 |
+
# define CV_TRY_AVX 0
|
| 164 |
+
# define CV_CPU_FORCE_AVX 0
|
| 165 |
+
# define CV_CPU_HAS_SUPPORT_AVX 0
|
| 166 |
+
# define CV_CPU_CALL_AVX(fn, args)
|
| 167 |
+
# define CV_CPU_CALL_AVX_(fn, args)
|
| 168 |
+
#endif
|
| 169 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX(fn, args, mode, ...) CV_CPU_CALL_AVX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 170 |
+
|
| 171 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FP16
|
| 172 |
+
# define CV_TRY_FP16 1
|
| 173 |
+
# define CV_CPU_FORCE_FP16 1
|
| 174 |
+
# define CV_CPU_HAS_SUPPORT_FP16 1
|
| 175 |
+
# define CV_CPU_CALL_FP16(fn, args) return (cpu_baseline::fn args)
|
| 176 |
+
# define CV_CPU_CALL_FP16_(fn, args) return (opt_FP16::fn args)
|
| 177 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FP16
|
| 178 |
+
# define CV_TRY_FP16 1
|
| 179 |
+
# define CV_CPU_FORCE_FP16 0
|
| 180 |
+
# define CV_CPU_HAS_SUPPORT_FP16 (cv::checkHardwareSupport(CV_CPU_FP16))
|
| 181 |
+
# define CV_CPU_CALL_FP16(fn, args) if (CV_CPU_HAS_SUPPORT_FP16) return (opt_FP16::fn args)
|
| 182 |
+
# define CV_CPU_CALL_FP16_(fn, args) if (CV_CPU_HAS_SUPPORT_FP16) return (opt_FP16::fn args)
|
| 183 |
+
#else
|
| 184 |
+
# define CV_TRY_FP16 0
|
| 185 |
+
# define CV_CPU_FORCE_FP16 0
|
| 186 |
+
# define CV_CPU_HAS_SUPPORT_FP16 0
|
| 187 |
+
# define CV_CPU_CALL_FP16(fn, args)
|
| 188 |
+
# define CV_CPU_CALL_FP16_(fn, args)
|
| 189 |
+
#endif
|
| 190 |
+
#define __CV_CPU_DISPATCH_CHAIN_FP16(fn, args, mode, ...) CV_CPU_CALL_FP16(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 191 |
+
|
| 192 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX2
|
| 193 |
+
# define CV_TRY_AVX2 1
|
| 194 |
+
# define CV_CPU_FORCE_AVX2 1
|
| 195 |
+
# define CV_CPU_HAS_SUPPORT_AVX2 1
|
| 196 |
+
# define CV_CPU_CALL_AVX2(fn, args) return (cpu_baseline::fn args)
|
| 197 |
+
# define CV_CPU_CALL_AVX2_(fn, args) return (opt_AVX2::fn args)
|
| 198 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX2
|
| 199 |
+
# define CV_TRY_AVX2 1
|
| 200 |
+
# define CV_CPU_FORCE_AVX2 0
|
| 201 |
+
# define CV_CPU_HAS_SUPPORT_AVX2 (cv::checkHardwareSupport(CV_CPU_AVX2))
|
| 202 |
+
# define CV_CPU_CALL_AVX2(fn, args) if (CV_CPU_HAS_SUPPORT_AVX2) return (opt_AVX2::fn args)
|
| 203 |
+
# define CV_CPU_CALL_AVX2_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX2) return (opt_AVX2::fn args)
|
| 204 |
+
#else
|
| 205 |
+
# define CV_TRY_AVX2 0
|
| 206 |
+
# define CV_CPU_FORCE_AVX2 0
|
| 207 |
+
# define CV_CPU_HAS_SUPPORT_AVX2 0
|
| 208 |
+
# define CV_CPU_CALL_AVX2(fn, args)
|
| 209 |
+
# define CV_CPU_CALL_AVX2_(fn, args)
|
| 210 |
+
#endif
|
| 211 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX2(fn, args, mode, ...) CV_CPU_CALL_AVX2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 212 |
+
|
| 213 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FMA3
|
| 214 |
+
# define CV_TRY_FMA3 1
|
| 215 |
+
# define CV_CPU_FORCE_FMA3 1
|
| 216 |
+
# define CV_CPU_HAS_SUPPORT_FMA3 1
|
| 217 |
+
# define CV_CPU_CALL_FMA3(fn, args) return (cpu_baseline::fn args)
|
| 218 |
+
# define CV_CPU_CALL_FMA3_(fn, args) return (opt_FMA3::fn args)
|
| 219 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FMA3
|
| 220 |
+
# define CV_TRY_FMA3 1
|
| 221 |
+
# define CV_CPU_FORCE_FMA3 0
|
| 222 |
+
# define CV_CPU_HAS_SUPPORT_FMA3 (cv::checkHardwareSupport(CV_CPU_FMA3))
|
| 223 |
+
# define CV_CPU_CALL_FMA3(fn, args) if (CV_CPU_HAS_SUPPORT_FMA3) return (opt_FMA3::fn args)
|
| 224 |
+
# define CV_CPU_CALL_FMA3_(fn, args) if (CV_CPU_HAS_SUPPORT_FMA3) return (opt_FMA3::fn args)
|
| 225 |
+
#else
|
| 226 |
+
# define CV_TRY_FMA3 0
|
| 227 |
+
# define CV_CPU_FORCE_FMA3 0
|
| 228 |
+
# define CV_CPU_HAS_SUPPORT_FMA3 0
|
| 229 |
+
# define CV_CPU_CALL_FMA3(fn, args)
|
| 230 |
+
# define CV_CPU_CALL_FMA3_(fn, args)
|
| 231 |
+
#endif
|
| 232 |
+
#define __CV_CPU_DISPATCH_CHAIN_FMA3(fn, args, mode, ...) CV_CPU_CALL_FMA3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 233 |
+
|
| 234 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX_512F
|
| 235 |
+
# define CV_TRY_AVX_512F 1
|
| 236 |
+
# define CV_CPU_FORCE_AVX_512F 1
|
| 237 |
+
# define CV_CPU_HAS_SUPPORT_AVX_512F 1
|
| 238 |
+
# define CV_CPU_CALL_AVX_512F(fn, args) return (cpu_baseline::fn args)
|
| 239 |
+
# define CV_CPU_CALL_AVX_512F_(fn, args) return (opt_AVX_512F::fn args)
|
| 240 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX_512F
|
| 241 |
+
# define CV_TRY_AVX_512F 1
|
| 242 |
+
# define CV_CPU_FORCE_AVX_512F 0
|
| 243 |
+
# define CV_CPU_HAS_SUPPORT_AVX_512F (cv::checkHardwareSupport(CV_CPU_AVX_512F))
|
| 244 |
+
# define CV_CPU_CALL_AVX_512F(fn, args) if (CV_CPU_HAS_SUPPORT_AVX_512F) return (opt_AVX_512F::fn args)
|
| 245 |
+
# define CV_CPU_CALL_AVX_512F_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX_512F) return (opt_AVX_512F::fn args)
|
| 246 |
+
#else
|
| 247 |
+
# define CV_TRY_AVX_512F 0
|
| 248 |
+
# define CV_CPU_FORCE_AVX_512F 0
|
| 249 |
+
# define CV_CPU_HAS_SUPPORT_AVX_512F 0
|
| 250 |
+
# define CV_CPU_CALL_AVX_512F(fn, args)
|
| 251 |
+
# define CV_CPU_CALL_AVX_512F_(fn, args)
|
| 252 |
+
#endif
|
| 253 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX_512F(fn, args, mode, ...) CV_CPU_CALL_AVX_512F(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 254 |
+
|
| 255 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_COMMON
|
| 256 |
+
# define CV_TRY_AVX512_COMMON 1
|
| 257 |
+
# define CV_CPU_FORCE_AVX512_COMMON 1
|
| 258 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_COMMON 1
|
| 259 |
+
# define CV_CPU_CALL_AVX512_COMMON(fn, args) return (cpu_baseline::fn args)
|
| 260 |
+
# define CV_CPU_CALL_AVX512_COMMON_(fn, args) return (opt_AVX512_COMMON::fn args)
|
| 261 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_COMMON
|
| 262 |
+
# define CV_TRY_AVX512_COMMON 1
|
| 263 |
+
# define CV_CPU_FORCE_AVX512_COMMON 0
|
| 264 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_COMMON (cv::checkHardwareSupport(CV_CPU_AVX512_COMMON))
|
| 265 |
+
# define CV_CPU_CALL_AVX512_COMMON(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_COMMON) return (opt_AVX512_COMMON::fn args)
|
| 266 |
+
# define CV_CPU_CALL_AVX512_COMMON_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_COMMON) return (opt_AVX512_COMMON::fn args)
|
| 267 |
+
#else
|
| 268 |
+
# define CV_TRY_AVX512_COMMON 0
|
| 269 |
+
# define CV_CPU_FORCE_AVX512_COMMON 0
|
| 270 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_COMMON 0
|
| 271 |
+
# define CV_CPU_CALL_AVX512_COMMON(fn, args)
|
| 272 |
+
# define CV_CPU_CALL_AVX512_COMMON_(fn, args)
|
| 273 |
+
#endif
|
| 274 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_COMMON(fn, args, mode, ...) CV_CPU_CALL_AVX512_COMMON(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 275 |
+
|
| 276 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_KNL
|
| 277 |
+
# define CV_TRY_AVX512_KNL 1
|
| 278 |
+
# define CV_CPU_FORCE_AVX512_KNL 1
|
| 279 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNL 1
|
| 280 |
+
# define CV_CPU_CALL_AVX512_KNL(fn, args) return (cpu_baseline::fn args)
|
| 281 |
+
# define CV_CPU_CALL_AVX512_KNL_(fn, args) return (opt_AVX512_KNL::fn args)
|
| 282 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_KNL
|
| 283 |
+
# define CV_TRY_AVX512_KNL 1
|
| 284 |
+
# define CV_CPU_FORCE_AVX512_KNL 0
|
| 285 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNL (cv::checkHardwareSupport(CV_CPU_AVX512_KNL))
|
| 286 |
+
# define CV_CPU_CALL_AVX512_KNL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNL) return (opt_AVX512_KNL::fn args)
|
| 287 |
+
# define CV_CPU_CALL_AVX512_KNL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNL) return (opt_AVX512_KNL::fn args)
|
| 288 |
+
#else
|
| 289 |
+
# define CV_TRY_AVX512_KNL 0
|
| 290 |
+
# define CV_CPU_FORCE_AVX512_KNL 0
|
| 291 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNL 0
|
| 292 |
+
# define CV_CPU_CALL_AVX512_KNL(fn, args)
|
| 293 |
+
# define CV_CPU_CALL_AVX512_KNL_(fn, args)
|
| 294 |
+
#endif
|
| 295 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_KNL(fn, args, mode, ...) CV_CPU_CALL_AVX512_KNL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 296 |
+
|
| 297 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_KNM
|
| 298 |
+
# define CV_TRY_AVX512_KNM 1
|
| 299 |
+
# define CV_CPU_FORCE_AVX512_KNM 1
|
| 300 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNM 1
|
| 301 |
+
# define CV_CPU_CALL_AVX512_KNM(fn, args) return (cpu_baseline::fn args)
|
| 302 |
+
# define CV_CPU_CALL_AVX512_KNM_(fn, args) return (opt_AVX512_KNM::fn args)
|
| 303 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_KNM
|
| 304 |
+
# define CV_TRY_AVX512_KNM 1
|
| 305 |
+
# define CV_CPU_FORCE_AVX512_KNM 0
|
| 306 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNM (cv::checkHardwareSupport(CV_CPU_AVX512_KNM))
|
| 307 |
+
# define CV_CPU_CALL_AVX512_KNM(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNM) return (opt_AVX512_KNM::fn args)
|
| 308 |
+
# define CV_CPU_CALL_AVX512_KNM_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNM) return (opt_AVX512_KNM::fn args)
|
| 309 |
+
#else
|
| 310 |
+
# define CV_TRY_AVX512_KNM 0
|
| 311 |
+
# define CV_CPU_FORCE_AVX512_KNM 0
|
| 312 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_KNM 0
|
| 313 |
+
# define CV_CPU_CALL_AVX512_KNM(fn, args)
|
| 314 |
+
# define CV_CPU_CALL_AVX512_KNM_(fn, args)
|
| 315 |
+
#endif
|
| 316 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_KNM(fn, args, mode, ...) CV_CPU_CALL_AVX512_KNM(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 317 |
+
|
| 318 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_SKX
|
| 319 |
+
# define CV_TRY_AVX512_SKX 1
|
| 320 |
+
# define CV_CPU_FORCE_AVX512_SKX 1
|
| 321 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_SKX 1
|
| 322 |
+
# define CV_CPU_CALL_AVX512_SKX(fn, args) return (cpu_baseline::fn args)
|
| 323 |
+
# define CV_CPU_CALL_AVX512_SKX_(fn, args) return (opt_AVX512_SKX::fn args)
|
| 324 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_SKX
|
| 325 |
+
# define CV_TRY_AVX512_SKX 1
|
| 326 |
+
# define CV_CPU_FORCE_AVX512_SKX 0
|
| 327 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_SKX (cv::checkHardwareSupport(CV_CPU_AVX512_SKX))
|
| 328 |
+
# define CV_CPU_CALL_AVX512_SKX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_SKX) return (opt_AVX512_SKX::fn args)
|
| 329 |
+
# define CV_CPU_CALL_AVX512_SKX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_SKX) return (opt_AVX512_SKX::fn args)
|
| 330 |
+
#else
|
| 331 |
+
# define CV_TRY_AVX512_SKX 0
|
| 332 |
+
# define CV_CPU_FORCE_AVX512_SKX 0
|
| 333 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_SKX 0
|
| 334 |
+
# define CV_CPU_CALL_AVX512_SKX(fn, args)
|
| 335 |
+
# define CV_CPU_CALL_AVX512_SKX_(fn, args)
|
| 336 |
+
#endif
|
| 337 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_SKX(fn, args, mode, ...) CV_CPU_CALL_AVX512_SKX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 338 |
+
|
| 339 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_CNL
|
| 340 |
+
# define CV_TRY_AVX512_CNL 1
|
| 341 |
+
# define CV_CPU_FORCE_AVX512_CNL 1
|
| 342 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CNL 1
|
| 343 |
+
# define CV_CPU_CALL_AVX512_CNL(fn, args) return (cpu_baseline::fn args)
|
| 344 |
+
# define CV_CPU_CALL_AVX512_CNL_(fn, args) return (opt_AVX512_CNL::fn args)
|
| 345 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_CNL
|
| 346 |
+
# define CV_TRY_AVX512_CNL 1
|
| 347 |
+
# define CV_CPU_FORCE_AVX512_CNL 0
|
| 348 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CNL (cv::checkHardwareSupport(CV_CPU_AVX512_CNL))
|
| 349 |
+
# define CV_CPU_CALL_AVX512_CNL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CNL) return (opt_AVX512_CNL::fn args)
|
| 350 |
+
# define CV_CPU_CALL_AVX512_CNL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CNL) return (opt_AVX512_CNL::fn args)
|
| 351 |
+
#else
|
| 352 |
+
# define CV_TRY_AVX512_CNL 0
|
| 353 |
+
# define CV_CPU_FORCE_AVX512_CNL 0
|
| 354 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CNL 0
|
| 355 |
+
# define CV_CPU_CALL_AVX512_CNL(fn, args)
|
| 356 |
+
# define CV_CPU_CALL_AVX512_CNL_(fn, args)
|
| 357 |
+
#endif
|
| 358 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_CNL(fn, args, mode, ...) CV_CPU_CALL_AVX512_CNL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 359 |
+
|
| 360 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_CLX
|
| 361 |
+
# define CV_TRY_AVX512_CLX 1
|
| 362 |
+
# define CV_CPU_FORCE_AVX512_CLX 1
|
| 363 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CLX 1
|
| 364 |
+
# define CV_CPU_CALL_AVX512_CLX(fn, args) return (cpu_baseline::fn args)
|
| 365 |
+
# define CV_CPU_CALL_AVX512_CLX_(fn, args) return (opt_AVX512_CLX::fn args)
|
| 366 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_CLX
|
| 367 |
+
# define CV_TRY_AVX512_CLX 1
|
| 368 |
+
# define CV_CPU_FORCE_AVX512_CLX 0
|
| 369 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CLX (cv::checkHardwareSupport(CV_CPU_AVX512_CLX))
|
| 370 |
+
# define CV_CPU_CALL_AVX512_CLX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CLX) return (opt_AVX512_CLX::fn args)
|
| 371 |
+
# define CV_CPU_CALL_AVX512_CLX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CLX) return (opt_AVX512_CLX::fn args)
|
| 372 |
+
#else
|
| 373 |
+
# define CV_TRY_AVX512_CLX 0
|
| 374 |
+
# define CV_CPU_FORCE_AVX512_CLX 0
|
| 375 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_CLX 0
|
| 376 |
+
# define CV_CPU_CALL_AVX512_CLX(fn, args)
|
| 377 |
+
# define CV_CPU_CALL_AVX512_CLX_(fn, args)
|
| 378 |
+
#endif
|
| 379 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_CLX(fn, args, mode, ...) CV_CPU_CALL_AVX512_CLX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 380 |
+
|
| 381 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_ICL
|
| 382 |
+
# define CV_TRY_AVX512_ICL 1
|
| 383 |
+
# define CV_CPU_FORCE_AVX512_ICL 1
|
| 384 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_ICL 1
|
| 385 |
+
# define CV_CPU_CALL_AVX512_ICL(fn, args) return (cpu_baseline::fn args)
|
| 386 |
+
# define CV_CPU_CALL_AVX512_ICL_(fn, args) return (opt_AVX512_ICL::fn args)
|
| 387 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_ICL
|
| 388 |
+
# define CV_TRY_AVX512_ICL 1
|
| 389 |
+
# define CV_CPU_FORCE_AVX512_ICL 0
|
| 390 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_ICL (cv::checkHardwareSupport(CV_CPU_AVX512_ICL))
|
| 391 |
+
# define CV_CPU_CALL_AVX512_ICL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_ICL) return (opt_AVX512_ICL::fn args)
|
| 392 |
+
# define CV_CPU_CALL_AVX512_ICL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_ICL) return (opt_AVX512_ICL::fn args)
|
| 393 |
+
#else
|
| 394 |
+
# define CV_TRY_AVX512_ICL 0
|
| 395 |
+
# define CV_CPU_FORCE_AVX512_ICL 0
|
| 396 |
+
# define CV_CPU_HAS_SUPPORT_AVX512_ICL 0
|
| 397 |
+
# define CV_CPU_CALL_AVX512_ICL(fn, args)
|
| 398 |
+
# define CV_CPU_CALL_AVX512_ICL_(fn, args)
|
| 399 |
+
#endif
|
| 400 |
+
#define __CV_CPU_DISPATCH_CHAIN_AVX512_ICL(fn, args, mode, ...) CV_CPU_CALL_AVX512_ICL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 401 |
+
|
| 402 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON
|
| 403 |
+
# define CV_TRY_NEON 1
|
| 404 |
+
# define CV_CPU_FORCE_NEON 1
|
| 405 |
+
# define CV_CPU_HAS_SUPPORT_NEON 1
|
| 406 |
+
# define CV_CPU_CALL_NEON(fn, args) return (cpu_baseline::fn args)
|
| 407 |
+
# define CV_CPU_CALL_NEON_(fn, args) return (opt_NEON::fn args)
|
| 408 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON
|
| 409 |
+
# define CV_TRY_NEON 1
|
| 410 |
+
# define CV_CPU_FORCE_NEON 0
|
| 411 |
+
# define CV_CPU_HAS_SUPPORT_NEON (cv::checkHardwareSupport(CV_CPU_NEON))
|
| 412 |
+
# define CV_CPU_CALL_NEON(fn, args) if (CV_CPU_HAS_SUPPORT_NEON) return (opt_NEON::fn args)
|
| 413 |
+
# define CV_CPU_CALL_NEON_(fn, args) if (CV_CPU_HAS_SUPPORT_NEON) return (opt_NEON::fn args)
|
| 414 |
+
#else
|
| 415 |
+
# define CV_TRY_NEON 0
|
| 416 |
+
# define CV_CPU_FORCE_NEON 0
|
| 417 |
+
# define CV_CPU_HAS_SUPPORT_NEON 0
|
| 418 |
+
# define CV_CPU_CALL_NEON(fn, args)
|
| 419 |
+
# define CV_CPU_CALL_NEON_(fn, args)
|
| 420 |
+
#endif
|
| 421 |
+
#define __CV_CPU_DISPATCH_CHAIN_NEON(fn, args, mode, ...) CV_CPU_CALL_NEON(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 422 |
+
|
| 423 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON_DOTPROD
|
| 424 |
+
# define CV_TRY_NEON_DOTPROD 1
|
| 425 |
+
# define CV_CPU_FORCE_NEON_DOTPROD 1
|
| 426 |
+
# define CV_CPU_HAS_SUPPORT_NEON_DOTPROD 1
|
| 427 |
+
# define CV_CPU_CALL_NEON_DOTPROD(fn, args) return (cpu_baseline::fn args)
|
| 428 |
+
# define CV_CPU_CALL_NEON_DOTPROD_(fn, args) return (opt_NEON_DOTPROD::fn args)
|
| 429 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON_DOTPROD
|
| 430 |
+
# define CV_TRY_NEON_DOTPROD 1
|
| 431 |
+
# define CV_CPU_FORCE_NEON_DOTPROD 0
|
| 432 |
+
# define CV_CPU_HAS_SUPPORT_NEON_DOTPROD (cv::checkHardwareSupport(CV_CPU_NEON_DOTPROD))
|
| 433 |
+
# define CV_CPU_CALL_NEON_DOTPROD(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_DOTPROD) return (opt_NEON_DOTPROD::fn args)
|
| 434 |
+
# define CV_CPU_CALL_NEON_DOTPROD_(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_DOTPROD) return (opt_NEON_DOTPROD::fn args)
|
| 435 |
+
#else
|
| 436 |
+
# define CV_TRY_NEON_DOTPROD 0
|
| 437 |
+
# define CV_CPU_FORCE_NEON_DOTPROD 0
|
| 438 |
+
# define CV_CPU_HAS_SUPPORT_NEON_DOTPROD 0
|
| 439 |
+
# define CV_CPU_CALL_NEON_DOTPROD(fn, args)
|
| 440 |
+
# define CV_CPU_CALL_NEON_DOTPROD_(fn, args)
|
| 441 |
+
#endif
|
| 442 |
+
#define __CV_CPU_DISPATCH_CHAIN_NEON_DOTPROD(fn, args, mode, ...) CV_CPU_CALL_NEON_DOTPROD(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 443 |
+
|
| 444 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON_FP16
|
| 445 |
+
# define CV_TRY_NEON_FP16 1
|
| 446 |
+
# define CV_CPU_FORCE_NEON_FP16 1
|
| 447 |
+
# define CV_CPU_HAS_SUPPORT_NEON_FP16 1
|
| 448 |
+
# define CV_CPU_CALL_NEON_FP16(fn, args) return (cpu_baseline::fn args)
|
| 449 |
+
# define CV_CPU_CALL_NEON_FP16_(fn, args) return (opt_NEON_FP16::fn args)
|
| 450 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON_FP16
|
| 451 |
+
# define CV_TRY_NEON_FP16 1
|
| 452 |
+
# define CV_CPU_FORCE_NEON_FP16 0
|
| 453 |
+
# define CV_CPU_HAS_SUPPORT_NEON_FP16 (cv::checkHardwareSupport(CV_CPU_NEON_FP16))
|
| 454 |
+
# define CV_CPU_CALL_NEON_FP16(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_FP16) return (opt_NEON_FP16::fn args)
|
| 455 |
+
# define CV_CPU_CALL_NEON_FP16_(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_FP16) return (opt_NEON_FP16::fn args)
|
| 456 |
+
#else
|
| 457 |
+
# define CV_TRY_NEON_FP16 0
|
| 458 |
+
# define CV_CPU_FORCE_NEON_FP16 0
|
| 459 |
+
# define CV_CPU_HAS_SUPPORT_NEON_FP16 0
|
| 460 |
+
# define CV_CPU_CALL_NEON_FP16(fn, args)
|
| 461 |
+
# define CV_CPU_CALL_NEON_FP16_(fn, args)
|
| 462 |
+
#endif
|
| 463 |
+
#define __CV_CPU_DISPATCH_CHAIN_NEON_FP16(fn, args, mode, ...) CV_CPU_CALL_NEON_FP16(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 464 |
+
|
| 465 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON_BF16
|
| 466 |
+
# define CV_TRY_NEON_BF16 1
|
| 467 |
+
# define CV_CPU_FORCE_NEON_BF16 1
|
| 468 |
+
# define CV_CPU_HAS_SUPPORT_NEON_BF16 1
|
| 469 |
+
# define CV_CPU_CALL_NEON_BF16(fn, args) return (cpu_baseline::fn args)
|
| 470 |
+
# define CV_CPU_CALL_NEON_BF16_(fn, args) return (opt_NEON_BF16::fn args)
|
| 471 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON_BF16
|
| 472 |
+
# define CV_TRY_NEON_BF16 1
|
| 473 |
+
# define CV_CPU_FORCE_NEON_BF16 0
|
| 474 |
+
# define CV_CPU_HAS_SUPPORT_NEON_BF16 (cv::checkHardwareSupport(CV_CPU_NEON_BF16))
|
| 475 |
+
# define CV_CPU_CALL_NEON_BF16(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_BF16) return (opt_NEON_BF16::fn args)
|
| 476 |
+
# define CV_CPU_CALL_NEON_BF16_(fn, args) if (CV_CPU_HAS_SUPPORT_NEON_BF16) return (opt_NEON_BF16::fn args)
|
| 477 |
+
#else
|
| 478 |
+
# define CV_TRY_NEON_BF16 0
|
| 479 |
+
# define CV_CPU_FORCE_NEON_BF16 0
|
| 480 |
+
# define CV_CPU_HAS_SUPPORT_NEON_BF16 0
|
| 481 |
+
# define CV_CPU_CALL_NEON_BF16(fn, args)
|
| 482 |
+
# define CV_CPU_CALL_NEON_BF16_(fn, args)
|
| 483 |
+
#endif
|
| 484 |
+
#define __CV_CPU_DISPATCH_CHAIN_NEON_BF16(fn, args, mode, ...) CV_CPU_CALL_NEON_BF16(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 485 |
+
|
| 486 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_MSA
|
| 487 |
+
# define CV_TRY_MSA 1
|
| 488 |
+
# define CV_CPU_FORCE_MSA 1
|
| 489 |
+
# define CV_CPU_HAS_SUPPORT_MSA 1
|
| 490 |
+
# define CV_CPU_CALL_MSA(fn, args) return (cpu_baseline::fn args)
|
| 491 |
+
# define CV_CPU_CALL_MSA_(fn, args) return (opt_MSA::fn args)
|
| 492 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_MSA
|
| 493 |
+
# define CV_TRY_MSA 1
|
| 494 |
+
# define CV_CPU_FORCE_MSA 0
|
| 495 |
+
# define CV_CPU_HAS_SUPPORT_MSA (cv::checkHardwareSupport(CV_CPU_MSA))
|
| 496 |
+
# define CV_CPU_CALL_MSA(fn, args) if (CV_CPU_HAS_SUPPORT_MSA) return (opt_MSA::fn args)
|
| 497 |
+
# define CV_CPU_CALL_MSA_(fn, args) if (CV_CPU_HAS_SUPPORT_MSA) return (opt_MSA::fn args)
|
| 498 |
+
#else
|
| 499 |
+
# define CV_TRY_MSA 0
|
| 500 |
+
# define CV_CPU_FORCE_MSA 0
|
| 501 |
+
# define CV_CPU_HAS_SUPPORT_MSA 0
|
| 502 |
+
# define CV_CPU_CALL_MSA(fn, args)
|
| 503 |
+
# define CV_CPU_CALL_MSA_(fn, args)
|
| 504 |
+
#endif
|
| 505 |
+
#define __CV_CPU_DISPATCH_CHAIN_MSA(fn, args, mode, ...) CV_CPU_CALL_MSA(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 506 |
+
|
| 507 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_VSX
|
| 508 |
+
# define CV_TRY_VSX 1
|
| 509 |
+
# define CV_CPU_FORCE_VSX 1
|
| 510 |
+
# define CV_CPU_HAS_SUPPORT_VSX 1
|
| 511 |
+
# define CV_CPU_CALL_VSX(fn, args) return (cpu_baseline::fn args)
|
| 512 |
+
# define CV_CPU_CALL_VSX_(fn, args) return (opt_VSX::fn args)
|
| 513 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_VSX
|
| 514 |
+
# define CV_TRY_VSX 1
|
| 515 |
+
# define CV_CPU_FORCE_VSX 0
|
| 516 |
+
# define CV_CPU_HAS_SUPPORT_VSX (cv::checkHardwareSupport(CV_CPU_VSX))
|
| 517 |
+
# define CV_CPU_CALL_VSX(fn, args) if (CV_CPU_HAS_SUPPORT_VSX) return (opt_VSX::fn args)
|
| 518 |
+
# define CV_CPU_CALL_VSX_(fn, args) if (CV_CPU_HAS_SUPPORT_VSX) return (opt_VSX::fn args)
|
| 519 |
+
#else
|
| 520 |
+
# define CV_TRY_VSX 0
|
| 521 |
+
# define CV_CPU_FORCE_VSX 0
|
| 522 |
+
# define CV_CPU_HAS_SUPPORT_VSX 0
|
| 523 |
+
# define CV_CPU_CALL_VSX(fn, args)
|
| 524 |
+
# define CV_CPU_CALL_VSX_(fn, args)
|
| 525 |
+
#endif
|
| 526 |
+
#define __CV_CPU_DISPATCH_CHAIN_VSX(fn, args, mode, ...) CV_CPU_CALL_VSX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 527 |
+
|
| 528 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_VSX3
|
| 529 |
+
# define CV_TRY_VSX3 1
|
| 530 |
+
# define CV_CPU_FORCE_VSX3 1
|
| 531 |
+
# define CV_CPU_HAS_SUPPORT_VSX3 1
|
| 532 |
+
# define CV_CPU_CALL_VSX3(fn, args) return (cpu_baseline::fn args)
|
| 533 |
+
# define CV_CPU_CALL_VSX3_(fn, args) return (opt_VSX3::fn args)
|
| 534 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_VSX3
|
| 535 |
+
# define CV_TRY_VSX3 1
|
| 536 |
+
# define CV_CPU_FORCE_VSX3 0
|
| 537 |
+
# define CV_CPU_HAS_SUPPORT_VSX3 (cv::checkHardwareSupport(CV_CPU_VSX3))
|
| 538 |
+
# define CV_CPU_CALL_VSX3(fn, args) if (CV_CPU_HAS_SUPPORT_VSX3) return (opt_VSX3::fn args)
|
| 539 |
+
# define CV_CPU_CALL_VSX3_(fn, args) if (CV_CPU_HAS_SUPPORT_VSX3) return (opt_VSX3::fn args)
|
| 540 |
+
#else
|
| 541 |
+
# define CV_TRY_VSX3 0
|
| 542 |
+
# define CV_CPU_FORCE_VSX3 0
|
| 543 |
+
# define CV_CPU_HAS_SUPPORT_VSX3 0
|
| 544 |
+
# define CV_CPU_CALL_VSX3(fn, args)
|
| 545 |
+
# define CV_CPU_CALL_VSX3_(fn, args)
|
| 546 |
+
#endif
|
| 547 |
+
#define __CV_CPU_DISPATCH_CHAIN_VSX3(fn, args, mode, ...) CV_CPU_CALL_VSX3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 548 |
+
|
| 549 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_RVV
|
| 550 |
+
# define CV_TRY_RVV 1
|
| 551 |
+
# define CV_CPU_FORCE_RVV 1
|
| 552 |
+
# define CV_CPU_HAS_SUPPORT_RVV 1
|
| 553 |
+
# define CV_CPU_CALL_RVV(fn, args) return (cpu_baseline::fn args)
|
| 554 |
+
# define CV_CPU_CALL_RVV_(fn, args) return (opt_RVV::fn args)
|
| 555 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_RVV
|
| 556 |
+
# define CV_TRY_RVV 1
|
| 557 |
+
# define CV_CPU_FORCE_RVV 0
|
| 558 |
+
# define CV_CPU_HAS_SUPPORT_RVV (cv::checkHardwareSupport(CV_CPU_RVV))
|
| 559 |
+
# define CV_CPU_CALL_RVV(fn, args) if (CV_CPU_HAS_SUPPORT_RVV) return (opt_RVV::fn args)
|
| 560 |
+
# define CV_CPU_CALL_RVV_(fn, args) if (CV_CPU_HAS_SUPPORT_RVV) return (opt_RVV::fn args)
|
| 561 |
+
#else
|
| 562 |
+
# define CV_TRY_RVV 0
|
| 563 |
+
# define CV_CPU_FORCE_RVV 0
|
| 564 |
+
# define CV_CPU_HAS_SUPPORT_RVV 0
|
| 565 |
+
# define CV_CPU_CALL_RVV(fn, args)
|
| 566 |
+
# define CV_CPU_CALL_RVV_(fn, args)
|
| 567 |
+
#endif
|
| 568 |
+
#define __CV_CPU_DISPATCH_CHAIN_RVV(fn, args, mode, ...) CV_CPU_CALL_RVV(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 569 |
+
|
| 570 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_LSX
|
| 571 |
+
# define CV_TRY_LSX 1
|
| 572 |
+
# define CV_CPU_FORCE_LSX 1
|
| 573 |
+
# define CV_CPU_HAS_SUPPORT_LSX 1
|
| 574 |
+
# define CV_CPU_CALL_LSX(fn, args) return (cpu_baseline::fn args)
|
| 575 |
+
# define CV_CPU_CALL_LSX_(fn, args) return (opt_LSX::fn args)
|
| 576 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_LSX
|
| 577 |
+
# define CV_TRY_LSX 1
|
| 578 |
+
# define CV_CPU_FORCE_LSX 0
|
| 579 |
+
# define CV_CPU_HAS_SUPPORT_LSX (cv::checkHardwareSupport(CV_CPU_LSX))
|
| 580 |
+
# define CV_CPU_CALL_LSX(fn, args) if (CV_CPU_HAS_SUPPORT_LSX) return (opt_LSX::fn args)
|
| 581 |
+
# define CV_CPU_CALL_LSX_(fn, args) if (CV_CPU_HAS_SUPPORT_LSX) return (opt_LSX::fn args)
|
| 582 |
+
#else
|
| 583 |
+
# define CV_TRY_LSX 0
|
| 584 |
+
# define CV_CPU_FORCE_LSX 0
|
| 585 |
+
# define CV_CPU_HAS_SUPPORT_LSX 0
|
| 586 |
+
# define CV_CPU_CALL_LSX(fn, args)
|
| 587 |
+
# define CV_CPU_CALL_LSX_(fn, args)
|
| 588 |
+
#endif
|
| 589 |
+
#define __CV_CPU_DISPATCH_CHAIN_LSX(fn, args, mode, ...) CV_CPU_CALL_LSX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 590 |
+
|
| 591 |
+
#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_LASX
|
| 592 |
+
# define CV_TRY_LASX 1
|
| 593 |
+
# define CV_CPU_FORCE_LASX 1
|
| 594 |
+
# define CV_CPU_HAS_SUPPORT_LASX 1
|
| 595 |
+
# define CV_CPU_CALL_LASX(fn, args) return (cpu_baseline::fn args)
|
| 596 |
+
# define CV_CPU_CALL_LASX_(fn, args) return (opt_LASX::fn args)
|
| 597 |
+
#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_LASX
|
| 598 |
+
# define CV_TRY_LASX 1
|
| 599 |
+
# define CV_CPU_FORCE_LASX 0
|
| 600 |
+
# define CV_CPU_HAS_SUPPORT_LASX (cv::checkHardwareSupport(CV_CPU_LASX))
|
| 601 |
+
# define CV_CPU_CALL_LASX(fn, args) if (CV_CPU_HAS_SUPPORT_LASX) return (opt_LASX::fn args)
|
| 602 |
+
# define CV_CPU_CALL_LASX_(fn, args) if (CV_CPU_HAS_SUPPORT_LASX) return (opt_LASX::fn args)
|
| 603 |
+
#else
|
| 604 |
+
# define CV_TRY_LASX 0
|
| 605 |
+
# define CV_CPU_FORCE_LASX 0
|
| 606 |
+
# define CV_CPU_HAS_SUPPORT_LASX 0
|
| 607 |
+
# define CV_CPU_CALL_LASX(fn, args)
|
| 608 |
+
# define CV_CPU_CALL_LASX_(fn, args)
|
| 609 |
+
#endif
|
| 610 |
+
#define __CV_CPU_DISPATCH_CHAIN_LASX(fn, args, mode, ...) CV_CPU_CALL_LASX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
|
| 611 |
+
|
| 612 |
+
#define CV_CPU_CALL_BASELINE(fn, args) return (cpu_baseline::fn args)
|
| 613 |
+
#define __CV_CPU_DISPATCH_CHAIN_BASELINE(fn, args, mode, ...) CV_CPU_CALL_BASELINE(fn, args) /* last in sequence */
|
3rdparty/opencv/include/opencv2/core/cvdef.h
ADDED
|
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*M///////////////////////////////////////////////////////////////////////////////////////
|
| 2 |
+
//
|
| 3 |
+
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
| 4 |
+
//
|
| 5 |
+
// By downloading, copying, installing or using the software you agree to this license.
|
| 6 |
+
// If you do not agree to this license, do not download, install,
|
| 7 |
+
// copy or use the software.
|
| 8 |
+
//
|
| 9 |
+
//
|
| 10 |
+
// License Agreement
|
| 11 |
+
// For Open Source Computer Vision Library
|
| 12 |
+
//
|
| 13 |
+
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
| 14 |
+
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
| 15 |
+
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
| 16 |
+
// Copyright (C) 2015, Itseez Inc., all rights reserved.
|
| 17 |
+
// Third party copyrights are property of their respective owners.
|
| 18 |
+
//
|
| 19 |
+
// Redistribution and use in source and binary forms, with or without modification,
|
| 20 |
+
// are permitted provided that the following conditions are met:
|
| 21 |
+
//
|
| 22 |
+
// * Redistribution's of source code must retain the above copyright notice,
|
| 23 |
+
// this list of conditions and the following disclaimer.
|
| 24 |
+
//
|
| 25 |
+
// * Redistribution's in binary form must reproduce the above copyright notice,
|
| 26 |
+
// this list of conditions and the following disclaimer in the documentation
|
| 27 |
+
// and/or other materials provided with the distribution.
|
| 28 |
+
//
|
| 29 |
+
// * The name of the copyright holders may not be used to endorse or promote products
|
| 30 |
+
// derived from this software without specific prior written permission.
|
| 31 |
+
//
|
| 32 |
+
// This software is provided by the copyright holders and contributors "as is" and
|
| 33 |
+
// any express or implied warranties, including, but not limited to, the implied
|
| 34 |
+
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
| 35 |
+
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
| 36 |
+
// indirect, incidental, special, exemplary, or consequential damages
|
| 37 |
+
// (including, but not limited to, procurement of substitute goods or services;
|
| 38 |
+
// loss of use, data, or profits; or business interruption) however caused
|
| 39 |
+
// and on any theory of liability, whether in contract, strict liability,
|
| 40 |
+
// or tort (including negligence or otherwise) arising in any way out of
|
| 41 |
+
// the use of this software, even if advised of the possibility of such damage.
|
| 42 |
+
//
|
| 43 |
+
//M*/
|
| 44 |
+
|
| 45 |
+
#ifndef OPENCV_CORE_CVDEF_H
|
| 46 |
+
#define OPENCV_CORE_CVDEF_H
|
| 47 |
+
|
| 48 |
+
#include "opencv2/core/version.hpp"
|
| 49 |
+
|
| 50 |
+
//! @addtogroup core_utils
|
| 51 |
+
//! @{
|
| 52 |
+
|
| 53 |
+
#ifdef OPENCV_INCLUDE_PORT_FILE // User-provided header file with custom platform configuration
|
| 54 |
+
#include OPENCV_INCLUDE_PORT_FILE
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#if !defined CV_DOXYGEN && !defined CV_IGNORE_DEBUG_BUILD_GUARD
|
| 58 |
+
#if (defined(_MSC_VER) && (defined(DEBUG) || defined(_DEBUG))) || \
|
| 59 |
+
(defined(_GLIBCXX_DEBUG) || defined(_GLIBCXX_DEBUG_PEDANTIC))
|
| 60 |
+
// Guard to prevent using of binary incompatible binaries / runtimes
|
| 61 |
+
// https://github.com/opencv/opencv/pull/9161
|
| 62 |
+
#define CV__DEBUG_NS_BEGIN namespace debug_build_guard {
|
| 63 |
+
#define CV__DEBUG_NS_END }
|
| 64 |
+
namespace cv { namespace debug_build_guard { } using namespace debug_build_guard; }
|
| 65 |
+
#endif
|
| 66 |
+
#endif
|
| 67 |
+
|
| 68 |
+
#ifndef CV__DEBUG_NS_BEGIN
|
| 69 |
+
#define CV__DEBUG_NS_BEGIN
|
| 70 |
+
#define CV__DEBUG_NS_END
|
| 71 |
+
#endif
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
#ifdef __OPENCV_BUILD
|
| 75 |
+
#include "cvconfig.h"
|
| 76 |
+
#endif
|
| 77 |
+
|
| 78 |
+
#ifndef __CV_EXPAND
|
| 79 |
+
#define __CV_EXPAND(x) x
|
| 80 |
+
#endif
|
| 81 |
+
|
| 82 |
+
#ifndef __CV_CAT
|
| 83 |
+
#define __CV_CAT__(x, y) x ## y
|
| 84 |
+
#define __CV_CAT_(x, y) __CV_CAT__(x, y)
|
| 85 |
+
#define __CV_CAT(x, y) __CV_CAT_(x, y)
|
| 86 |
+
#endif
|
| 87 |
+
|
| 88 |
+
#define __CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
|
| 89 |
+
#define __CV_VA_NUM_ARGS(...) __CV_EXPAND(__CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
|
| 90 |
+
|
| 91 |
+
#ifdef CV_Func
|
| 92 |
+
// keep current value (through OpenCV port file)
|
| 93 |
+
#elif defined __GNUC__ || (defined (__cpluscplus) && (__cpluscplus >= 201103))
|
| 94 |
+
#define CV_Func __func__
|
| 95 |
+
#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major__ >= 305)
|
| 96 |
+
#define CV_Func __func__
|
| 97 |
+
#elif defined(__STDC_VERSION__) && (__STDC_VERSION >= 199901)
|
| 98 |
+
#define CV_Func __func__
|
| 99 |
+
#elif defined _MSC_VER
|
| 100 |
+
#define CV_Func __FUNCTION__
|
| 101 |
+
#elif defined(__INTEL_COMPILER) && (_INTEL_COMPILER >= 600)
|
| 102 |
+
#define CV_Func __FUNCTION__
|
| 103 |
+
#elif defined __IBMCPP__ && __IBMCPP__ >=500
|
| 104 |
+
#define CV_Func __FUNCTION__
|
| 105 |
+
#elif defined __BORLAND__ && (__BORLANDC__ >= 0x550)
|
| 106 |
+
#define CV_Func __FUNC__
|
| 107 |
+
#else
|
| 108 |
+
#define CV_Func "<unknown>"
|
| 109 |
+
#endif
|
| 110 |
+
|
| 111 |
+
//! @cond IGNORED
|
| 112 |
+
|
| 113 |
+
//////////////// static assert /////////////////
|
| 114 |
+
#define CVAUX_CONCAT_EXP(a, b) a##b
|
| 115 |
+
#define CVAUX_CONCAT(a, b) CVAUX_CONCAT_EXP(a,b)
|
| 116 |
+
|
| 117 |
+
#if defined(__clang__)
|
| 118 |
+
# ifndef __has_extension
|
| 119 |
+
# define __has_extension __has_feature /* compatibility, for older versions of clang */
|
| 120 |
+
# endif
|
| 121 |
+
# if __has_extension(cxx_static_assert)
|
| 122 |
+
# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
|
| 123 |
+
# elif __has_extension(c_static_assert)
|
| 124 |
+
# define CV_StaticAssert(condition, reason) _Static_assert((condition), reason " " #condition)
|
| 125 |
+
# endif
|
| 126 |
+
#elif defined(__GNUC__)
|
| 127 |
+
# if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L)
|
| 128 |
+
# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
|
| 129 |
+
# endif
|
| 130 |
+
#elif defined(_MSC_VER)
|
| 131 |
+
# if _MSC_VER >= 1600 /* MSVC 10 */
|
| 132 |
+
# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition)
|
| 133 |
+
# endif
|
| 134 |
+
#endif
|
| 135 |
+
#ifndef CV_StaticAssert
|
| 136 |
+
# if !defined(__clang__) && defined(__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 302)
|
| 137 |
+
# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); })
|
| 138 |
+
# else
|
| 139 |
+
namespace cv {
|
| 140 |
+
template <bool x> struct CV_StaticAssert_failed;
|
| 141 |
+
template <> struct CV_StaticAssert_failed<true> { enum { val = 1 }; };
|
| 142 |
+
template<int x> struct CV_StaticAssert_test {};
|
| 143 |
+
}
|
| 144 |
+
# define CV_StaticAssert(condition, reason)\
|
| 145 |
+
typedef cv::CV_StaticAssert_test< sizeof(cv::CV_StaticAssert_failed< static_cast<bool>(condition) >) > CVAUX_CONCAT(CV_StaticAssert_failed_at_, __LINE__)
|
| 146 |
+
# endif
|
| 147 |
+
#endif
|
| 148 |
+
|
| 149 |
+
// Suppress warning "-Wdeprecated-declarations" / C4996
|
| 150 |
+
#if defined(_MSC_VER)
|
| 151 |
+
#define CV_DO_PRAGMA(x) __pragma(x)
|
| 152 |
+
#elif defined(__GNUC__)
|
| 153 |
+
#define CV_DO_PRAGMA(x) _Pragma (#x)
|
| 154 |
+
#else
|
| 155 |
+
#define CV_DO_PRAGMA(x)
|
| 156 |
+
#endif
|
| 157 |
+
|
| 158 |
+
#ifdef _MSC_VER
|
| 159 |
+
#define CV_SUPPRESS_DEPRECATED_START \
|
| 160 |
+
CV_DO_PRAGMA(warning(push)) \
|
| 161 |
+
CV_DO_PRAGMA(warning(disable: 4996))
|
| 162 |
+
#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(warning(pop))
|
| 163 |
+
#elif defined (__clang__) || ((__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 405))
|
| 164 |
+
#define CV_SUPPRESS_DEPRECATED_START \
|
| 165 |
+
CV_DO_PRAGMA(GCC diagnostic push) \
|
| 166 |
+
CV_DO_PRAGMA(GCC diagnostic ignored "-Wdeprecated-declarations")
|
| 167 |
+
#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(GCC diagnostic pop)
|
| 168 |
+
#else
|
| 169 |
+
#define CV_SUPPRESS_DEPRECATED_START
|
| 170 |
+
#define CV_SUPPRESS_DEPRECATED_END
|
| 171 |
+
#endif
|
| 172 |
+
|
| 173 |
+
#define CV_UNUSED(name) (void)name
|
| 174 |
+
|
| 175 |
+
//! @endcond
|
| 176 |
+
|
| 177 |
+
// undef problematic defines sometimes defined by system headers (windows.h in particular)
|
| 178 |
+
#undef small
|
| 179 |
+
#undef min
|
| 180 |
+
#undef max
|
| 181 |
+
#undef abs
|
| 182 |
+
#undef Complex
|
| 183 |
+
|
| 184 |
+
#if defined __cplusplus
|
| 185 |
+
#include <limits>
|
| 186 |
+
#else
|
| 187 |
+
#include <limits.h>
|
| 188 |
+
#endif
|
| 189 |
+
|
| 190 |
+
#include "opencv2/core/hal/interface.h"
|
| 191 |
+
|
| 192 |
+
#if defined __ICL
|
| 193 |
+
# define CV_ICC __ICL
|
| 194 |
+
#elif defined __ICC
|
| 195 |
+
# define CV_ICC __ICC
|
| 196 |
+
#elif defined __ECL
|
| 197 |
+
# define CV_ICC __ECL
|
| 198 |
+
#elif defined __ECC
|
| 199 |
+
# define CV_ICC __ECC
|
| 200 |
+
#elif defined __INTEL_COMPILER
|
| 201 |
+
# define CV_ICC __INTEL_COMPILER
|
| 202 |
+
#endif
|
| 203 |
+
|
| 204 |
+
#if defined _WIN32
|
| 205 |
+
# define CV_CDECL __cdecl
|
| 206 |
+
# define CV_STDCALL __stdcall
|
| 207 |
+
#else
|
| 208 |
+
# define CV_CDECL
|
| 209 |
+
# define CV_STDCALL
|
| 210 |
+
#endif
|
| 211 |
+
|
| 212 |
+
#ifndef CV_INLINE
|
| 213 |
+
# if defined __cplusplus
|
| 214 |
+
# define CV_INLINE static inline
|
| 215 |
+
# elif defined _MSC_VER
|
| 216 |
+
# define CV_INLINE __inline
|
| 217 |
+
# else
|
| 218 |
+
# define CV_INLINE static
|
| 219 |
+
# endif
|
| 220 |
+
#endif
|
| 221 |
+
|
| 222 |
+
#ifndef CV_ALWAYS_INLINE
|
| 223 |
+
#if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
|
| 224 |
+
#define CV_ALWAYS_INLINE inline __attribute__((always_inline))
|
| 225 |
+
#elif defined(_MSC_VER)
|
| 226 |
+
#define CV_ALWAYS_INLINE __forceinline
|
| 227 |
+
#else
|
| 228 |
+
#define CV_ALWAYS_INLINE inline
|
| 229 |
+
#endif
|
| 230 |
+
#endif
|
| 231 |
+
|
| 232 |
+
#if defined CV_DISABLE_OPTIMIZATION || (defined CV_ICC && !defined CV_ENABLE_UNROLLED)
|
| 233 |
+
# define CV_ENABLE_UNROLLED 0
|
| 234 |
+
#else
|
| 235 |
+
# define CV_ENABLE_UNROLLED 1
|
| 236 |
+
#endif
|
| 237 |
+
|
| 238 |
+
#ifdef __GNUC__
|
| 239 |
+
# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
|
| 240 |
+
#elif defined _MSC_VER
|
| 241 |
+
# define CV_DECL_ALIGNED(x) __declspec(align(x))
|
| 242 |
+
#else
|
| 243 |
+
# define CV_DECL_ALIGNED(x)
|
| 244 |
+
#endif
|
| 245 |
+
|
| 246 |
+
/* CPU features and intrinsics support */
|
| 247 |
+
#define CV_CPU_NONE 0
|
| 248 |
+
#define CV_CPU_MMX 1
|
| 249 |
+
#define CV_CPU_SSE 2
|
| 250 |
+
#define CV_CPU_SSE2 3
|
| 251 |
+
#define CV_CPU_SSE3 4
|
| 252 |
+
#define CV_CPU_SSSE3 5
|
| 253 |
+
#define CV_CPU_SSE4_1 6
|
| 254 |
+
#define CV_CPU_SSE4_2 7
|
| 255 |
+
#define CV_CPU_POPCNT 8
|
| 256 |
+
#define CV_CPU_FP16 9
|
| 257 |
+
#define CV_CPU_AVX 10
|
| 258 |
+
#define CV_CPU_AVX2 11
|
| 259 |
+
#define CV_CPU_FMA3 12
|
| 260 |
+
|
| 261 |
+
#define CV_CPU_AVX_512F 13
|
| 262 |
+
#define CV_CPU_AVX_512BW 14
|
| 263 |
+
#define CV_CPU_AVX_512CD 15
|
| 264 |
+
#define CV_CPU_AVX_512DQ 16
|
| 265 |
+
#define CV_CPU_AVX_512ER 17
|
| 266 |
+
#define CV_CPU_AVX_512IFMA512 18 // deprecated
|
| 267 |
+
#define CV_CPU_AVX_512IFMA 18
|
| 268 |
+
#define CV_CPU_AVX_512PF 19
|
| 269 |
+
#define CV_CPU_AVX_512VBMI 20
|
| 270 |
+
#define CV_CPU_AVX_512VL 21
|
| 271 |
+
#define CV_CPU_AVX_512VBMI2 22
|
| 272 |
+
#define CV_CPU_AVX_512VNNI 23
|
| 273 |
+
#define CV_CPU_AVX_512BITALG 24
|
| 274 |
+
#define CV_CPU_AVX_512VPOPCNTDQ 25
|
| 275 |
+
#define CV_CPU_AVX_5124VNNIW 26
|
| 276 |
+
#define CV_CPU_AVX_5124FMAPS 27
|
| 277 |
+
|
| 278 |
+
#define CV_CPU_NEON 100
|
| 279 |
+
#define CV_CPU_NEON_DOTPROD 101
|
| 280 |
+
#define CV_CPU_NEON_FP16 102
|
| 281 |
+
#define CV_CPU_NEON_BF16 103
|
| 282 |
+
|
| 283 |
+
#define CV_CPU_MSA 150
|
| 284 |
+
|
| 285 |
+
#define CV_CPU_RISCVV 170
|
| 286 |
+
|
| 287 |
+
#define CV_CPU_VSX 200
|
| 288 |
+
#define CV_CPU_VSX3 201
|
| 289 |
+
|
| 290 |
+
#define CV_CPU_RVV 210
|
| 291 |
+
|
| 292 |
+
#define CV_CPU_LSX 230
|
| 293 |
+
#define CV_CPU_LASX 231
|
| 294 |
+
|
| 295 |
+
// CPU features groups
|
| 296 |
+
#define CV_CPU_AVX512_SKX 256
|
| 297 |
+
#define CV_CPU_AVX512_COMMON 257
|
| 298 |
+
#define CV_CPU_AVX512_KNL 258
|
| 299 |
+
#define CV_CPU_AVX512_KNM 259
|
| 300 |
+
#define CV_CPU_AVX512_CNL 260
|
| 301 |
+
#define CV_CPU_AVX512_CLX 261
|
| 302 |
+
#define CV_CPU_AVX512_ICL 262
|
| 303 |
+
|
| 304 |
+
// when adding to this list remember to update the following enum
|
| 305 |
+
#define CV_HARDWARE_MAX_FEATURE 512
|
| 306 |
+
|
| 307 |
+
/** @brief Available CPU features.
|
| 308 |
+
*/
|
| 309 |
+
enum CpuFeatures {
|
| 310 |
+
CPU_MMX = 1,
|
| 311 |
+
CPU_SSE = 2,
|
| 312 |
+
CPU_SSE2 = 3,
|
| 313 |
+
CPU_SSE3 = 4,
|
| 314 |
+
CPU_SSSE3 = 5,
|
| 315 |
+
CPU_SSE4_1 = 6,
|
| 316 |
+
CPU_SSE4_2 = 7,
|
| 317 |
+
CPU_POPCNT = 8,
|
| 318 |
+
CPU_FP16 = 9,
|
| 319 |
+
CPU_AVX = 10,
|
| 320 |
+
CPU_AVX2 = 11,
|
| 321 |
+
CPU_FMA3 = 12,
|
| 322 |
+
|
| 323 |
+
CPU_AVX_512F = 13,
|
| 324 |
+
CPU_AVX_512BW = 14,
|
| 325 |
+
CPU_AVX_512CD = 15,
|
| 326 |
+
CPU_AVX_512DQ = 16,
|
| 327 |
+
CPU_AVX_512ER = 17,
|
| 328 |
+
CPU_AVX_512IFMA512 = 18, // deprecated
|
| 329 |
+
CPU_AVX_512IFMA = 18,
|
| 330 |
+
CPU_AVX_512PF = 19,
|
| 331 |
+
CPU_AVX_512VBMI = 20,
|
| 332 |
+
CPU_AVX_512VL = 21,
|
| 333 |
+
CPU_AVX_512VBMI2 = 22,
|
| 334 |
+
CPU_AVX_512VNNI = 23,
|
| 335 |
+
CPU_AVX_512BITALG = 24,
|
| 336 |
+
CPU_AVX_512VPOPCNTDQ= 25,
|
| 337 |
+
CPU_AVX_5124VNNIW = 26,
|
| 338 |
+
CPU_AVX_5124FMAPS = 27,
|
| 339 |
+
|
| 340 |
+
CPU_NEON = 100,
|
| 341 |
+
CPU_NEON_DOTPROD = 101,
|
| 342 |
+
CPU_NEON_FP16 = 102,
|
| 343 |
+
CPU_NEON_BF16 = 103,
|
| 344 |
+
|
| 345 |
+
CPU_MSA = 150,
|
| 346 |
+
|
| 347 |
+
CPU_RISCVV = 170,
|
| 348 |
+
|
| 349 |
+
CPU_VSX = 200,
|
| 350 |
+
CPU_VSX3 = 201,
|
| 351 |
+
|
| 352 |
+
CPU_RVV = 210,
|
| 353 |
+
|
| 354 |
+
CPU_LSX = 230,
|
| 355 |
+
CPU_LASX = 231,
|
| 356 |
+
|
| 357 |
+
CPU_AVX512_SKX = 256, //!< Skylake-X with AVX-512F/CD/BW/DQ/VL
|
| 358 |
+
CPU_AVX512_COMMON = 257, //!< Common instructions AVX-512F/CD for all CPUs that support AVX-512
|
| 359 |
+
CPU_AVX512_KNL = 258, //!< Knights Landing with AVX-512F/CD/ER/PF
|
| 360 |
+
CPU_AVX512_KNM = 259, //!< Knights Mill with AVX-512F/CD/ER/PF/4FMAPS/4VNNIW/VPOPCNTDQ
|
| 361 |
+
CPU_AVX512_CNL = 260, //!< Cannon Lake with AVX-512F/CD/BW/DQ/VL/IFMA/VBMI
|
| 362 |
+
CPU_AVX512_CLX = 261, //!< Cascade Lake with AVX-512F/CD/BW/DQ/VL/VNNI
|
| 363 |
+
CPU_AVX512_ICL = 262, //!< Ice Lake with AVX-512F/CD/BW/DQ/VL/IFMA/VBMI/VNNI/VBMI2/BITALG/VPOPCNTDQ
|
| 364 |
+
|
| 365 |
+
CPU_MAX_FEATURE = 512 // see CV_HARDWARE_MAX_FEATURE
|
| 366 |
+
};
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
#include "cv_cpu_dispatch.h"
|
| 370 |
+
|
| 371 |
+
#if !defined(CV_STRONG_ALIGNMENT) && defined(__arm__) && !(defined(__aarch64__) || defined(_M_ARM64))
|
| 372 |
+
// int*, int64* should be propertly aligned pointers on ARMv7
|
| 373 |
+
#define CV_STRONG_ALIGNMENT 1
|
| 374 |
+
#endif
|
| 375 |
+
#if !defined(CV_STRONG_ALIGNMENT)
|
| 376 |
+
#define CV_STRONG_ALIGNMENT 0
|
| 377 |
+
#endif
|
| 378 |
+
|
| 379 |
+
/* fundamental constants */
|
| 380 |
+
#define CV_PI 3.1415926535897932384626433832795
|
| 381 |
+
#define CV_2PI 6.283185307179586476925286766559
|
| 382 |
+
#define CV_LOG2 0.69314718055994530941723212145818
|
| 383 |
+
|
| 384 |
+
#if defined __ARM_FP16_FORMAT_IEEE \
|
| 385 |
+
&& !defined __CUDACC__
|
| 386 |
+
# define CV_FP16_TYPE 1
|
| 387 |
+
#else
|
| 388 |
+
# define CV_FP16_TYPE 0
|
| 389 |
+
#endif
|
| 390 |
+
|
| 391 |
+
typedef union Cv16suf
|
| 392 |
+
{
|
| 393 |
+
short i;
|
| 394 |
+
ushort u;
|
| 395 |
+
#if CV_FP16_TYPE
|
| 396 |
+
__fp16 h;
|
| 397 |
+
#endif
|
| 398 |
+
}
|
| 399 |
+
Cv16suf;
|
| 400 |
+
|
| 401 |
+
typedef union Cv32suf
|
| 402 |
+
{
|
| 403 |
+
int i;
|
| 404 |
+
unsigned u;
|
| 405 |
+
float f;
|
| 406 |
+
}
|
| 407 |
+
Cv32suf;
|
| 408 |
+
|
| 409 |
+
typedef union Cv64suf
|
| 410 |
+
{
|
| 411 |
+
int64 i;
|
| 412 |
+
uint64 u;
|
| 413 |
+
double f;
|
| 414 |
+
}
|
| 415 |
+
Cv64suf;
|
| 416 |
+
|
| 417 |
+
#ifndef OPENCV_ABI_COMPATIBILITY
|
| 418 |
+
#define OPENCV_ABI_COMPATIBILITY 400
|
| 419 |
+
#endif
|
| 420 |
+
|
| 421 |
+
#ifdef __OPENCV_BUILD
|
| 422 |
+
# define DISABLE_OPENCV_3_COMPATIBILITY
|
| 423 |
+
# define OPENCV_DISABLE_DEPRECATED_COMPATIBILITY
|
| 424 |
+
#endif
|
| 425 |
+
|
| 426 |
+
#ifndef CV_EXPORTS
|
| 427 |
+
# if (defined _WIN32 || defined WINCE || defined __CYGWIN__) && defined(CVAPI_EXPORTS)
|
| 428 |
+
# define CV_EXPORTS __declspec(dllexport)
|
| 429 |
+
# elif defined __GNUC__ && __GNUC__ >= 4 && (defined(CVAPI_EXPORTS) || defined(__APPLE__))
|
| 430 |
+
# define CV_EXPORTS __attribute__ ((visibility ("default")))
|
| 431 |
+
# endif
|
| 432 |
+
#endif
|
| 433 |
+
|
| 434 |
+
#ifndef CV_EXPORTS
|
| 435 |
+
# define CV_EXPORTS
|
| 436 |
+
#endif
|
| 437 |
+
|
| 438 |
+
#ifdef _MSC_VER
|
| 439 |
+
# define CV_EXPORTS_TEMPLATE
|
| 440 |
+
#else
|
| 441 |
+
# define CV_EXPORTS_TEMPLATE CV_EXPORTS
|
| 442 |
+
#endif
|
| 443 |
+
|
| 444 |
+
#ifndef CV_DEPRECATED
|
| 445 |
+
# if defined(__GNUC__)
|
| 446 |
+
# define CV_DEPRECATED __attribute__ ((deprecated))
|
| 447 |
+
# elif defined(_MSC_VER)
|
| 448 |
+
# define CV_DEPRECATED __declspec(deprecated)
|
| 449 |
+
# else
|
| 450 |
+
# define CV_DEPRECATED
|
| 451 |
+
# endif
|
| 452 |
+
#endif
|
| 453 |
+
|
| 454 |
+
#ifndef CV_DEPRECATED_EXTERNAL
|
| 455 |
+
# if defined(__OPENCV_BUILD)
|
| 456 |
+
# define CV_DEPRECATED_EXTERNAL /* nothing */
|
| 457 |
+
# else
|
| 458 |
+
# define CV_DEPRECATED_EXTERNAL CV_DEPRECATED
|
| 459 |
+
# endif
|
| 460 |
+
#endif
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
#ifndef CV_EXTERN_C
|
| 464 |
+
# ifdef __cplusplus
|
| 465 |
+
# define CV_EXTERN_C extern "C"
|
| 466 |
+
# else
|
| 467 |
+
# define CV_EXTERN_C
|
| 468 |
+
# endif
|
| 469 |
+
#endif
|
| 470 |
+
|
| 471 |
+
/* special informative macros for wrapper generators */
|
| 472 |
+
#define CV_EXPORTS_W CV_EXPORTS
|
| 473 |
+
#define CV_EXPORTS_W_SIMPLE CV_EXPORTS
|
| 474 |
+
#define CV_EXPORTS_AS(synonym) CV_EXPORTS
|
| 475 |
+
#define CV_EXPORTS_W_MAP CV_EXPORTS
|
| 476 |
+
#define CV_EXPORTS_W_PARAMS CV_EXPORTS
|
| 477 |
+
#define CV_IN_OUT
|
| 478 |
+
#define CV_OUT
|
| 479 |
+
#define CV_PROP
|
| 480 |
+
#define CV_PROP_RW
|
| 481 |
+
#define CV_ND // Indicates that input data should be parsed into Mat without channels
|
| 482 |
+
#define CV_WRAP
|
| 483 |
+
#define CV_WRAP_AS(synonym)
|
| 484 |
+
#define CV_WRAP_MAPPABLE(mappable)
|
| 485 |
+
#define CV_WRAP_PHANTOM(phantom_header)
|
| 486 |
+
#define CV_WRAP_DEFAULT(val)
|
| 487 |
+
/* Indicates that the function parameter has filesystem path semantic */
|
| 488 |
+
#define CV_WRAP_FILE_PATH
|
| 489 |
+
|
| 490 |
+
/****************************************************************************************\
|
| 491 |
+
* Matrix type (Mat) *
|
| 492 |
+
\****************************************************************************************/
|
| 493 |
+
|
| 494 |
+
#define CV_MAX_DIM 32
|
| 495 |
+
#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT)
|
| 496 |
+
#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1)
|
| 497 |
+
#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1)
|
| 498 |
+
#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK)
|
| 499 |
+
#define CV_MAT_CONT_FLAG_SHIFT 14
|
| 500 |
+
#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT)
|
| 501 |
+
#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG)
|
| 502 |
+
#define CV_IS_CONT_MAT CV_IS_MAT_CONT
|
| 503 |
+
#define CV_SUBMAT_FLAG_SHIFT 15
|
| 504 |
+
#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT)
|
| 505 |
+
#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG)
|
| 506 |
+
|
| 507 |
+
/** Size of each channel item,
|
| 508 |
+
0x28442211 = 0010 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */
|
| 509 |
+
#define CV_ELEM_SIZE1(type) ((0x28442211 >> CV_MAT_DEPTH(type)*4) & 15)
|
| 510 |
+
|
| 511 |
+
#define CV_ELEM_SIZE(type) (CV_MAT_CN(type)*CV_ELEM_SIZE1(type))
|
| 512 |
+
|
| 513 |
+
#ifndef MIN
|
| 514 |
+
# define MIN(a,b) ((a) > (b) ? (b) : (a))
|
| 515 |
+
#endif
|
| 516 |
+
|
| 517 |
+
#ifndef MAX
|
| 518 |
+
# define MAX(a,b) ((a) < (b) ? (b) : (a))
|
| 519 |
+
#endif
|
| 520 |
+
|
| 521 |
+
/** min & max without jumps */
|
| 522 |
+
#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1)))
|
| 523 |
+
#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1)))
|
| 524 |
+
#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))
|
| 525 |
+
#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b)))
|
| 526 |
+
#define CV_SIGN(a) CV_CMP((a),0)
|
| 527 |
+
|
| 528 |
+
///////////////////////////////////////// Enum operators ///////////////////////////////////////
|
| 529 |
+
|
| 530 |
+
/**
|
| 531 |
+
|
| 532 |
+
Provides compatibility operators for both classical and C++11 enum classes,
|
| 533 |
+
as well as exposing the C++11 enum class members for backwards compatibility
|
| 534 |
+
|
| 535 |
+
@code
|
| 536 |
+
// Provides operators required for flag enums
|
| 537 |
+
CV_ENUM_FLAGS(AccessFlag)
|
| 538 |
+
|
| 539 |
+
// Exposes the listed members of the enum class AccessFlag to the current namespace
|
| 540 |
+
CV_ENUM_CLASS_EXPOSE(AccessFlag, ACCESS_READ [, ACCESS_WRITE [, ...] ]);
|
| 541 |
+
@endcode
|
| 542 |
+
*/
|
| 543 |
+
|
| 544 |
+
#define __CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST) \
|
| 545 |
+
static const EnumType MEMBER_CONST = EnumType::MEMBER_CONST; \
|
| 546 |
+
|
| 547 |
+
#define __CV_ENUM_CLASS_EXPOSE_2(EnumType, MEMBER_CONST, ...) \
|
| 548 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 549 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_1(EnumType, __VA_ARGS__)); \
|
| 550 |
+
|
| 551 |
+
#define __CV_ENUM_CLASS_EXPOSE_3(EnumType, MEMBER_CONST, ...) \
|
| 552 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 553 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_2(EnumType, __VA_ARGS__)); \
|
| 554 |
+
|
| 555 |
+
#define __CV_ENUM_CLASS_EXPOSE_4(EnumType, MEMBER_CONST, ...) \
|
| 556 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 557 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_3(EnumType, __VA_ARGS__)); \
|
| 558 |
+
|
| 559 |
+
#define __CV_ENUM_CLASS_EXPOSE_5(EnumType, MEMBER_CONST, ...) \
|
| 560 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 561 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_4(EnumType, __VA_ARGS__)); \
|
| 562 |
+
|
| 563 |
+
#define __CV_ENUM_CLASS_EXPOSE_6(EnumType, MEMBER_CONST, ...) \
|
| 564 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 565 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_5(EnumType, __VA_ARGS__)); \
|
| 566 |
+
|
| 567 |
+
#define __CV_ENUM_CLASS_EXPOSE_7(EnumType, MEMBER_CONST, ...) \
|
| 568 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 569 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_6(EnumType, __VA_ARGS__)); \
|
| 570 |
+
|
| 571 |
+
#define __CV_ENUM_CLASS_EXPOSE_8(EnumType, MEMBER_CONST, ...) \
|
| 572 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 573 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_7(EnumType, __VA_ARGS__)); \
|
| 574 |
+
|
| 575 |
+
#define __CV_ENUM_CLASS_EXPOSE_9(EnumType, MEMBER_CONST, ...) \
|
| 576 |
+
__CV_ENUM_CLASS_EXPOSE_1(EnumType, MEMBER_CONST); \
|
| 577 |
+
__CV_EXPAND(__CV_ENUM_CLASS_EXPOSE_8(EnumType, __VA_ARGS__)); \
|
| 578 |
+
|
| 579 |
+
#define __CV_ENUM_FLAGS_LOGICAL_NOT(EnumType) \
|
| 580 |
+
static inline bool operator!(const EnumType& val) \
|
| 581 |
+
{ \
|
| 582 |
+
typedef std::underlying_type<EnumType>::type UnderlyingType; \
|
| 583 |
+
return !static_cast<UnderlyingType>(val); \
|
| 584 |
+
} \
|
| 585 |
+
|
| 586 |
+
#define __CV_ENUM_FLAGS_LOGICAL_NOT_EQ(Arg1Type, Arg2Type) \
|
| 587 |
+
static inline bool operator!=(const Arg1Type& a, const Arg2Type& b) \
|
| 588 |
+
{ \
|
| 589 |
+
return static_cast<int>(a) != static_cast<int>(b); \
|
| 590 |
+
} \
|
| 591 |
+
|
| 592 |
+
#define __CV_ENUM_FLAGS_LOGICAL_EQ(Arg1Type, Arg2Type) \
|
| 593 |
+
static inline bool operator==(const Arg1Type& a, const Arg2Type& b) \
|
| 594 |
+
{ \
|
| 595 |
+
return static_cast<int>(a) == static_cast<int>(b); \
|
| 596 |
+
} \
|
| 597 |
+
|
| 598 |
+
#define __CV_ENUM_FLAGS_BITWISE_NOT(EnumType) \
|
| 599 |
+
static inline EnumType operator~(const EnumType& val) \
|
| 600 |
+
{ \
|
| 601 |
+
typedef std::underlying_type<EnumType>::type UnderlyingType; \
|
| 602 |
+
return static_cast<EnumType>(~static_cast<UnderlyingType>(val)); \
|
| 603 |
+
} \
|
| 604 |
+
|
| 605 |
+
#define __CV_ENUM_FLAGS_BITWISE_OR(EnumType, Arg1Type, Arg2Type) \
|
| 606 |
+
static inline EnumType operator|(const Arg1Type& a, const Arg2Type& b) \
|
| 607 |
+
{ \
|
| 608 |
+
typedef std::underlying_type<EnumType>::type UnderlyingType; \
|
| 609 |
+
return static_cast<EnumType>(static_cast<UnderlyingType>(a) | static_cast<UnderlyingType>(b)); \
|
| 610 |
+
} \
|
| 611 |
+
|
| 612 |
+
#define __CV_ENUM_FLAGS_BITWISE_AND(EnumType, Arg1Type, Arg2Type) \
|
| 613 |
+
static inline EnumType operator&(const Arg1Type& a, const Arg2Type& b) \
|
| 614 |
+
{ \
|
| 615 |
+
typedef std::underlying_type<EnumType>::type UnderlyingType; \
|
| 616 |
+
return static_cast<EnumType>(static_cast<UnderlyingType>(a) & static_cast<UnderlyingType>(b)); \
|
| 617 |
+
} \
|
| 618 |
+
|
| 619 |
+
#define __CV_ENUM_FLAGS_BITWISE_XOR(EnumType, Arg1Type, Arg2Type) \
|
| 620 |
+
static inline EnumType operator^(const Arg1Type& a, const Arg2Type& b) \
|
| 621 |
+
{ \
|
| 622 |
+
typedef std::underlying_type<EnumType>::type UnderlyingType; \
|
| 623 |
+
return static_cast<EnumType>(static_cast<UnderlyingType>(a) ^ static_cast<UnderlyingType>(b)); \
|
| 624 |
+
} \
|
| 625 |
+
|
| 626 |
+
#define __CV_ENUM_FLAGS_BITWISE_OR_EQ(EnumType, Arg1Type) \
|
| 627 |
+
static inline EnumType& operator|=(EnumType& _this, const Arg1Type& val) \
|
| 628 |
+
{ \
|
| 629 |
+
_this = static_cast<EnumType>(static_cast<int>(_this) | static_cast<int>(val)); \
|
| 630 |
+
return _this; \
|
| 631 |
+
} \
|
| 632 |
+
|
| 633 |
+
#define __CV_ENUM_FLAGS_BITWISE_AND_EQ(EnumType, Arg1Type) \
|
| 634 |
+
static inline EnumType& operator&=(EnumType& _this, const Arg1Type& val) \
|
| 635 |
+
{ \
|
| 636 |
+
_this = static_cast<EnumType>(static_cast<int>(_this) & static_cast<int>(val)); \
|
| 637 |
+
return _this; \
|
| 638 |
+
} \
|
| 639 |
+
|
| 640 |
+
#define __CV_ENUM_FLAGS_BITWISE_XOR_EQ(EnumType, Arg1Type) \
|
| 641 |
+
static inline EnumType& operator^=(EnumType& _this, const Arg1Type& val) \
|
| 642 |
+
{ \
|
| 643 |
+
_this = static_cast<EnumType>(static_cast<int>(_this) ^ static_cast<int>(val)); \
|
| 644 |
+
return _this; \
|
| 645 |
+
} \
|
| 646 |
+
|
| 647 |
+
#define CV_ENUM_CLASS_EXPOSE(EnumType, ...) \
|
| 648 |
+
__CV_EXPAND(__CV_CAT(__CV_ENUM_CLASS_EXPOSE_, __CV_VA_NUM_ARGS(__VA_ARGS__))(EnumType, __VA_ARGS__)); \
|
| 649 |
+
|
| 650 |
+
#define CV_ENUM_FLAGS(EnumType) \
|
| 651 |
+
__CV_ENUM_FLAGS_LOGICAL_NOT (EnumType) \
|
| 652 |
+
__CV_ENUM_FLAGS_LOGICAL_EQ (EnumType, int) \
|
| 653 |
+
__CV_ENUM_FLAGS_LOGICAL_NOT_EQ (EnumType, int) \
|
| 654 |
+
\
|
| 655 |
+
__CV_ENUM_FLAGS_BITWISE_NOT (EnumType) \
|
| 656 |
+
__CV_ENUM_FLAGS_BITWISE_OR (EnumType, EnumType, EnumType) \
|
| 657 |
+
__CV_ENUM_FLAGS_BITWISE_AND (EnumType, EnumType, EnumType) \
|
| 658 |
+
__CV_ENUM_FLAGS_BITWISE_XOR (EnumType, EnumType, EnumType) \
|
| 659 |
+
\
|
| 660 |
+
__CV_ENUM_FLAGS_BITWISE_OR_EQ (EnumType, EnumType) \
|
| 661 |
+
__CV_ENUM_FLAGS_BITWISE_AND_EQ (EnumType, EnumType) \
|
| 662 |
+
__CV_ENUM_FLAGS_BITWISE_XOR_EQ (EnumType, EnumType) \
|
| 663 |
+
|
| 664 |
+
/****************************************************************************************\
|
| 665 |
+
* static analysys *
|
| 666 |
+
\****************************************************************************************/
|
| 667 |
+
|
| 668 |
+
// In practice, some macro are not processed correctly (noreturn is not detected).
|
| 669 |
+
// We need to use simplified definition for them.
|
| 670 |
+
#ifndef CV_STATIC_ANALYSIS
|
| 671 |
+
# if defined(__KLOCWORK__) || defined(__clang_analyzer__) || defined(__COVERITY__)
|
| 672 |
+
# define CV_STATIC_ANALYSIS 1
|
| 673 |
+
# endif
|
| 674 |
+
#else
|
| 675 |
+
# if defined(CV_STATIC_ANALYSIS) && !(__CV_CAT(1, CV_STATIC_ANALYSIS) == 1) // defined and not empty
|
| 676 |
+
# if 0 == CV_STATIC_ANALYSIS
|
| 677 |
+
# undef CV_STATIC_ANALYSIS
|
| 678 |
+
# endif
|
| 679 |
+
# endif
|
| 680 |
+
#endif
|
| 681 |
+
|
| 682 |
+
/****************************************************************************************\
|
| 683 |
+
* Thread sanitizer *
|
| 684 |
+
\****************************************************************************************/
|
| 685 |
+
#ifndef CV_THREAD_SANITIZER
|
| 686 |
+
# if defined(__has_feature)
|
| 687 |
+
# if __has_feature(thread_sanitizer)
|
| 688 |
+
# define CV_THREAD_SANITIZER
|
| 689 |
+
# endif
|
| 690 |
+
# endif
|
| 691 |
+
#endif
|
| 692 |
+
|
| 693 |
+
/****************************************************************************************\
|
| 694 |
+
* exchange-add operation for atomic operations on reference counters *
|
| 695 |
+
\****************************************************************************************/
|
| 696 |
+
|
| 697 |
+
#ifdef CV_XADD
|
| 698 |
+
// allow to use user-defined macro
|
| 699 |
+
#elif defined __GNUC__ || defined __clang__
|
| 700 |
+
# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__) && !defined __INTEL_COMPILER
|
| 701 |
+
# ifdef __ATOMIC_ACQ_REL
|
| 702 |
+
# define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
|
| 703 |
+
# else
|
| 704 |
+
# define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
|
| 705 |
+
# endif
|
| 706 |
+
# else
|
| 707 |
+
# if defined __ATOMIC_ACQ_REL && !defined __clang__
|
| 708 |
+
// version for gcc >= 4.7
|
| 709 |
+
# define CV_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
|
| 710 |
+
# else
|
| 711 |
+
# define CV_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
|
| 712 |
+
# endif
|
| 713 |
+
# endif
|
| 714 |
+
#elif defined _MSC_VER && !defined RC_INVOKED
|
| 715 |
+
# include <intrin.h>
|
| 716 |
+
# define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
|
| 717 |
+
#else
|
| 718 |
+
#ifdef OPENCV_FORCE_UNSAFE_XADD
|
| 719 |
+
CV_INLINE int CV_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; }
|
| 720 |
+
#else
|
| 721 |
+
#error "OpenCV: can't define safe CV_XADD macro for current platform (unsupported). Define CV_XADD macro through custom port header (see OPENCV_INCLUDE_PORT_FILE)"
|
| 722 |
+
#endif
|
| 723 |
+
#endif
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
/****************************************************************************************\
|
| 727 |
+
* CV_NORETURN attribute *
|
| 728 |
+
\****************************************************************************************/
|
| 729 |
+
|
| 730 |
+
#ifndef CV_NORETURN
|
| 731 |
+
# if defined(__GNUC__)
|
| 732 |
+
# define CV_NORETURN __attribute__((__noreturn__))
|
| 733 |
+
# elif defined(_MSC_VER) && (_MSC_VER >= 1300)
|
| 734 |
+
# define CV_NORETURN __declspec(noreturn)
|
| 735 |
+
# else
|
| 736 |
+
# define CV_NORETURN /* nothing by default */
|
| 737 |
+
# endif
|
| 738 |
+
#endif
|
| 739 |
+
|
| 740 |
+
/****************************************************************************************\
|
| 741 |
+
* CV_NODISCARD_STD attribute (C++17) *
|
| 742 |
+
* encourages the compiler to issue a warning if the return value is discarded *
|
| 743 |
+
\****************************************************************************************/
|
| 744 |
+
#ifndef CV_NODISCARD_STD
|
| 745 |
+
# ifndef __has_cpp_attribute
|
| 746 |
+
// workaround preprocessor non-compliance https://reviews.llvm.org/D57851
|
| 747 |
+
# define __has_cpp_attribute(__x) 0
|
| 748 |
+
# endif
|
| 749 |
+
# if __has_cpp_attribute(nodiscard)
|
| 750 |
+
# if defined(__NVCC__) && __CUDACC_VER_MAJOR__ < 12
|
| 751 |
+
# define CV_NODISCARD_STD
|
| 752 |
+
# else
|
| 753 |
+
# define CV_NODISCARD_STD [[nodiscard]]
|
| 754 |
+
# endif
|
| 755 |
+
# elif __cplusplus >= 201703L
|
| 756 |
+
// available when compiler is C++17 compliant
|
| 757 |
+
# define CV_NODISCARD_STD [[nodiscard]]
|
| 758 |
+
# elif defined(__INTEL_COMPILER)
|
| 759 |
+
// see above, available when C++17 is enabled
|
| 760 |
+
# elif defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L
|
| 761 |
+
// available with VS2017 v15.3+ with /std:c++17 or higher; works on functions and classes
|
| 762 |
+
# define CV_NODISCARD_STD [[nodiscard]]
|
| 763 |
+
# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 700) && (__cplusplus >= 201103L)
|
| 764 |
+
// available with GCC 7.0+; works on functions, works or silently fails on classes
|
| 765 |
+
# define CV_NODISCARD_STD [[nodiscard]]
|
| 766 |
+
# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 408) && (__cplusplus >= 201103L)
|
| 767 |
+
// available with GCC 4.8+ but it usually does nothing and can fail noisily -- therefore not used
|
| 768 |
+
// define CV_NODISCARD_STD [[gnu::warn_unused_result]]
|
| 769 |
+
# endif
|
| 770 |
+
#endif
|
| 771 |
+
#ifndef CV_NODISCARD_STD
|
| 772 |
+
# define CV_NODISCARD_STD /* nothing by default */
|
| 773 |
+
#endif
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
/****************************************************************************************\
|
| 777 |
+
* C++ 11 *
|
| 778 |
+
\****************************************************************************************/
|
| 779 |
+
#ifdef __cplusplus
|
| 780 |
+
// MSVC was stuck at __cplusplus == 199711L for a long time, even where it supports C++11,
|
| 781 |
+
// so check _MSC_VER instead. See:
|
| 782 |
+
// <https://devblogs.microsoft.com/cppblog/msvc-now-correctly-reports-__cplusplus>
|
| 783 |
+
# if defined(_MSC_VER)
|
| 784 |
+
# if _MSC_VER < 1800
|
| 785 |
+
# error "OpenCV 4.x+ requires enabled C++11 support"
|
| 786 |
+
# endif
|
| 787 |
+
# elif __cplusplus < 201103L
|
| 788 |
+
# error "OpenCV 4.x+ requires enabled C++11 support"
|
| 789 |
+
# endif
|
| 790 |
+
#endif
|
| 791 |
+
|
| 792 |
+
#ifndef CV_CXX11
|
| 793 |
+
# define CV_CXX11 1
|
| 794 |
+
#endif
|
| 795 |
+
|
| 796 |
+
#ifndef CV_OVERRIDE
|
| 797 |
+
# define CV_OVERRIDE override
|
| 798 |
+
#endif
|
| 799 |
+
|
| 800 |
+
#ifndef CV_FINAL
|
| 801 |
+
# define CV_FINAL final
|
| 802 |
+
#endif
|
| 803 |
+
|
| 804 |
+
#ifndef CV_NOEXCEPT
|
| 805 |
+
# define CV_NOEXCEPT noexcept
|
| 806 |
+
#endif
|
| 807 |
+
|
| 808 |
+
#ifndef CV_CONSTEXPR
|
| 809 |
+
# define CV_CONSTEXPR constexpr
|
| 810 |
+
#endif
|
| 811 |
+
|
| 812 |
+
// Integer types portability
|
| 813 |
+
#ifdef __cplusplus
|
| 814 |
+
#include <cstdint>
|
| 815 |
+
namespace cv {
|
| 816 |
+
using std::int8_t;
|
| 817 |
+
using std::uint8_t;
|
| 818 |
+
using std::int16_t;
|
| 819 |
+
using std::uint16_t;
|
| 820 |
+
using std::int32_t;
|
| 821 |
+
using std::uint32_t;
|
| 822 |
+
using std::int64_t;
|
| 823 |
+
using std::uint64_t;
|
| 824 |
+
}
|
| 825 |
+
#else // pure C
|
| 826 |
+
#include <stdint.h>
|
| 827 |
+
#endif
|
| 828 |
+
|
| 829 |
+
#ifdef __cplusplus
|
| 830 |
+
namespace cv
|
| 831 |
+
{
|
| 832 |
+
|
| 833 |
+
class hfloat
|
| 834 |
+
{
|
| 835 |
+
public:
|
| 836 |
+
#if CV_FP16_TYPE
|
| 837 |
+
|
| 838 |
+
hfloat() : h(0) {}
|
| 839 |
+
explicit hfloat(float x) { h = (__fp16)x; }
|
| 840 |
+
operator float() const { return (float)h; }
|
| 841 |
+
protected:
|
| 842 |
+
__fp16 h;
|
| 843 |
+
|
| 844 |
+
#else
|
| 845 |
+
hfloat() : w(0) {}
|
| 846 |
+
explicit hfloat(float x)
|
| 847 |
+
{
|
| 848 |
+
#if CV_FP16 && CV_AVX2
|
| 849 |
+
__m128 v = _mm_load_ss(&x);
|
| 850 |
+
w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0));
|
| 851 |
+
#else
|
| 852 |
+
Cv32suf in;
|
| 853 |
+
in.f = x;
|
| 854 |
+
unsigned sign = in.u & 0x80000000;
|
| 855 |
+
in.u ^= sign;
|
| 856 |
+
|
| 857 |
+
if( in.u >= 0x47800000 )
|
| 858 |
+
w = (ushort)(in.u > 0x7f800000 ? 0x7e00 : 0x7c00);
|
| 859 |
+
else
|
| 860 |
+
{
|
| 861 |
+
if (in.u < 0x38800000)
|
| 862 |
+
{
|
| 863 |
+
in.f += 0.5f;
|
| 864 |
+
w = (ushort)(in.u - 0x3f000000);
|
| 865 |
+
}
|
| 866 |
+
else
|
| 867 |
+
{
|
| 868 |
+
unsigned t = in.u + 0xc8000fff;
|
| 869 |
+
w = (ushort)((t + ((in.u >> 13) & 1)) >> 13);
|
| 870 |
+
}
|
| 871 |
+
}
|
| 872 |
+
|
| 873 |
+
w = (ushort)(w | (sign >> 16));
|
| 874 |
+
#endif
|
| 875 |
+
}
|
| 876 |
+
|
| 877 |
+
operator float() const
|
| 878 |
+
{
|
| 879 |
+
#if CV_FP16 && CV_AVX2
|
| 880 |
+
float f;
|
| 881 |
+
_mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w)));
|
| 882 |
+
return f;
|
| 883 |
+
#else
|
| 884 |
+
Cv32suf out;
|
| 885 |
+
|
| 886 |
+
unsigned t = ((w & 0x7fff) << 13) + 0x38000000;
|
| 887 |
+
unsigned sign = (w & 0x8000) << 16;
|
| 888 |
+
unsigned e = w & 0x7c00;
|
| 889 |
+
|
| 890 |
+
out.u = t + (1 << 23);
|
| 891 |
+
out.u = (e >= 0x7c00 ? t + 0x38000000 :
|
| 892 |
+
e == 0 ? (static_cast<void>(out.f -= 6.103515625e-05f), out.u) : t) | sign;
|
| 893 |
+
return out.f;
|
| 894 |
+
#endif
|
| 895 |
+
}
|
| 896 |
+
|
| 897 |
+
protected:
|
| 898 |
+
ushort w;
|
| 899 |
+
|
| 900 |
+
#endif
|
| 901 |
+
};
|
| 902 |
+
|
| 903 |
+
inline hfloat hfloatFromBits(ushort w) {
|
| 904 |
+
#if CV_FP16_TYPE
|
| 905 |
+
Cv16suf u;
|
| 906 |
+
u.u = w;
|
| 907 |
+
hfloat res(float(u.h));
|
| 908 |
+
return res;
|
| 909 |
+
#else
|
| 910 |
+
Cv32suf out;
|
| 911 |
+
|
| 912 |
+
unsigned t = ((w & 0x7fff) << 13) + 0x38000000;
|
| 913 |
+
unsigned sign = (w & 0x8000) << 16;
|
| 914 |
+
unsigned e = w & 0x7c00;
|
| 915 |
+
|
| 916 |
+
out.u = t + (1 << 23);
|
| 917 |
+
out.u = (e >= 0x7c00 ? t + 0x38000000 :
|
| 918 |
+
e == 0 ? (static_cast<void>(out.f -= 6.103515625e-05f), out.u) : t) | sign;
|
| 919 |
+
hfloat res(out.f);
|
| 920 |
+
return res;
|
| 921 |
+
#endif
|
| 922 |
+
}
|
| 923 |
+
|
| 924 |
+
#if !defined(__OPENCV_BUILD) && !(defined __STDCPP_FLOAT16_T__) && !(defined __ARM_NEON)
|
| 925 |
+
typedef hfloat float16_t;
|
| 926 |
+
#endif
|
| 927 |
+
|
| 928 |
+
}
|
| 929 |
+
#endif
|
| 930 |
+
|
| 931 |
+
/** @brief Constructs the 'fourcc' code, used in video codecs and many other places.
|
| 932 |
+
Simply call it with 4 chars like `CV_FOURCC('I', 'Y', 'U', 'V')`
|
| 933 |
+
*/
|
| 934 |
+
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
|
| 935 |
+
{
|
| 936 |
+
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
//! Macro to construct the fourcc code of the codec. Same as CV_FOURCC()
|
| 940 |
+
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
|
| 941 |
+
|
| 942 |
+
//! @}
|
| 943 |
+
|
| 944 |
+
#ifndef __cplusplus
|
| 945 |
+
#include "opencv2/core/fast_math.hpp" // define cvRound(double)
|
| 946 |
+
#endif
|
| 947 |
+
|
| 948 |
+
#endif // OPENCV_CORE_CVDEF_H
|