|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "precomp.hpp"
|
|
|
#include "opencv2/core/hal/intrin.hpp"
|
|
|
|
|
|
#include <limits>
|
|
|
|
|
|
#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))
|
|
|
|
|
|
namespace cv
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
class SIMDBayerStubInterpolator_
|
|
|
{
|
|
|
public:
|
|
|
int bayer2Gray(const T*, int, T*, int, int, int, int) const
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int bayer2RGB(const T*, int, T*, int, int) const
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int bayer2RGBA(const T*, int, T*, int, int, const T) const
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int bayer2RGB_EA(const T*, int, T*, int, int) const
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
|
};
|
|
|
|
|
|
#if CV_SIMD128
|
|
|
class SIMDBayerInterpolator_8u
|
|
|
{
|
|
|
public:
|
|
|
int bayer2Gray(const uchar* bayer, int bayer_step, uchar* dst,
|
|
|
int width, int bcoeff, int gcoeff, int rcoeff) const
|
|
|
{
|
|
|
#if CV_NEON
|
|
|
uint16x8_t masklo = vdupq_n_u16(255);
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 )
|
|
|
{
|
|
|
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
|
|
|
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
|
|
|
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
|
|
|
|
|
|
uint16x8_t b1_ = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
|
|
|
uint16x8_t b1 = vextq_u16(b1_, b1_, 1);
|
|
|
uint16x8_t b0 = vaddq_u16(b1_, b1);
|
|
|
|
|
|
|
|
|
|
|
|
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
|
|
|
uint16x8_t g1 = vandq_u16(r1, masklo);
|
|
|
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
|
|
|
uint16x8_t rot = vextq_u16(g1, g1, 1);
|
|
|
g1 = vshlq_n_u16(rot, 2);
|
|
|
|
|
|
|
|
|
|
|
|
r0 = vshrq_n_u16(r1, 8);
|
|
|
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
|
|
|
r0 = vshlq_n_u16(r0, 2);
|
|
|
|
|
|
|
|
|
|
|
|
b0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(b0), (short)(rcoeff*2)));
|
|
|
b1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(b1), (short)(rcoeff*4)));
|
|
|
|
|
|
g0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(g0), (short)(gcoeff*2)));
|
|
|
g1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(g1), (short)(gcoeff*2)));
|
|
|
|
|
|
r0 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(r0), (short)(bcoeff*2)));
|
|
|
r1 = vreinterpretq_u16_s16(vqdmulhq_n_s16(vreinterpretq_s16_u16(r1), (short)(bcoeff*4)));
|
|
|
|
|
|
g0 = vaddq_u16(vaddq_u16(g0, b0), r0);
|
|
|
g1 = vaddq_u16(vaddq_u16(g1, b1), r1);
|
|
|
|
|
|
uint8x8x2_t p = vzip_u8(vrshrn_n_u16(g0, 2), vrshrn_n_u16(g1, 2));
|
|
|
vst1_u8(dst, p.val[0]);
|
|
|
vst1_u8(dst + 8, p.val[1]);
|
|
|
}
|
|
|
#else
|
|
|
v_uint16x8 _b2y = v_setall_u16((ushort)(rcoeff*2));
|
|
|
v_uint16x8 _g2y = v_setall_u16((ushort)(gcoeff*2));
|
|
|
v_uint16x8 _r2y = v_setall_u16((ushort)(bcoeff*2));
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 )
|
|
|
{
|
|
|
v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer));
|
|
|
v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step));
|
|
|
v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2));
|
|
|
|
|
|
v_uint16x8 b1 = v_add(v_shr<7>(v_shl<8>(r0)), v_shr<7>(v_shl<8>(r2)));
|
|
|
v_uint16x8 b0 = v_add(v_rotate_right<1>(b1), b1);
|
|
|
b1 = v_shl<1>(v_rotate_right<1>(b1));
|
|
|
|
|
|
v_uint16x8 g0 = v_add(v_shr<7>(r0), v_shr<7>(r2));
|
|
|
v_uint16x8 g1 = v_shr<7>(v_shl<8>(r1));
|
|
|
g0 = v_add(g0, v_add(v_rotate_right<1>(g1), g1));
|
|
|
g1 = v_shl<2>(v_rotate_right<1>(g1));
|
|
|
|
|
|
r0 = v_shr<8>(r1);
|
|
|
r1 = v_shl<2>(v_add(v_rotate_right<1>(r0), r0));
|
|
|
r0 = v_shl<3>(r0);
|
|
|
|
|
|
g0 = v_shr<2>(v_add(v_add(v_mul_hi(b0, _b2y), v_mul_hi(g0, _g2y)), v_mul_hi(r0, _r2y)));
|
|
|
g1 = v_shr<2>(v_add(v_add(v_mul_hi(b1, _b2y), v_mul_hi(g1, _g2y)), v_mul_hi(r1, _r2y)));
|
|
|
v_uint8x16 pack_lo, pack_hi;
|
|
|
v_zip(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(g0)),
|
|
|
v_pack_u(v_reinterpret_as_s16(g1), v_reinterpret_as_s16(g1)),
|
|
|
pack_lo, pack_hi);
|
|
|
v_store(dst, pack_lo);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
return (int)(bayer - (bayer_end - width));
|
|
|
}
|
|
|
|
|
|
int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if CV_NEON
|
|
|
uint16x8_t masklo = vdupq_n_u16(255);
|
|
|
uint8x16x3_t pix;
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 42 )
|
|
|
{
|
|
|
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
|
|
|
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
|
|
|
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
|
|
|
|
|
|
uint16x8_t b1 = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
|
|
|
uint16x8_t nextb1 = vextq_u16(b1, b1, 1);
|
|
|
uint16x8_t b0 = vaddq_u16(b1, nextb1);
|
|
|
|
|
|
uint8x8x2_t bb = vzip_u8(vrshrn_n_u16(b0, 2), vrshrn_n_u16(nextb1, 1));
|
|
|
pix.val[1-blue] = vcombine_u8(bb.val[0], bb.val[1]);
|
|
|
|
|
|
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
|
|
|
uint16x8_t g1 = vandq_u16(r1, masklo);
|
|
|
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
|
|
|
g1 = vextq_u16(g1, g1, 1);
|
|
|
|
|
|
uint8x8x2_t gg = vzip_u8(vrshrn_n_u16(g0, 2), vmovn_u16(g1));
|
|
|
pix.val[1] = vcombine_u8(gg.val[0], gg.val[1]);
|
|
|
|
|
|
r0 = vshrq_n_u16(r1, 8);
|
|
|
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
|
|
|
|
|
|
uint8x8x2_t rr = vzip_u8(vmovn_u16(r0), vrshrn_n_u16(r1, 1));
|
|
|
pix.val[1+blue] = vcombine_u8(rr.val[0], rr.val[1]);
|
|
|
|
|
|
vst3q_u8(dst-1, pix);
|
|
|
}
|
|
|
#else
|
|
|
v_uint16x8 delta1 = v_setall_u16(1), delta2 = v_setall_u16(2);
|
|
|
v_uint16x8 mask = v_setall_u16(blue < 0 ? (ushort)(-1) : 0);
|
|
|
v_uint16x8 masklo = v_setall_u16(0x00ff);
|
|
|
v_uint8x16 z = v_setzero_u8();
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 42 )
|
|
|
{
|
|
|
v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer));
|
|
|
v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step));
|
|
|
v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2));
|
|
|
|
|
|
v_uint16x8 b1 = v_add(v_and(r0, masklo), v_and(r2, masklo));
|
|
|
v_uint16x8 nextb1 = v_rotate_right<1>(b1);
|
|
|
v_uint16x8 b0 = v_add(b1, nextb1);
|
|
|
b1 = v_shr<1>(v_add(nextb1, delta1));
|
|
|
b0 = v_shr<2>(v_add(b0, delta2));
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(b0), v_reinterpret_as_s16(b1)));
|
|
|
|
|
|
v_uint16x8 g0 = v_add(v_shr<8>(r0), v_shr<8>(r2));
|
|
|
v_uint16x8 g1 = v_and(r1, masklo);
|
|
|
g0 = v_add(g0, v_add(v_rotate_right<1>(g1), g1));
|
|
|
g1 = v_rotate_right<1>(g1);
|
|
|
g0 = v_shr<2>(v_add(g0, delta2));
|
|
|
|
|
|
g0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(g1)));
|
|
|
|
|
|
r0 = v_shr<8>(r1);
|
|
|
r1 = v_add(v_rotate_right<1>(r0), r0);
|
|
|
r1 = v_shr<1>(v_add(r1, delta1));
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(r0), v_reinterpret_as_s16(r1)));
|
|
|
|
|
|
b1 = v_and(v_xor(b0, r0), mask);
|
|
|
b0 = v_xor(b0, b1);
|
|
|
r0 = v_xor(r0, b1);
|
|
|
|
|
|
|
|
|
v_uint8x16 pack_lo, pack_hi;
|
|
|
v_zip(v_reinterpret_as_u8(b0), v_reinterpret_as_u8(g0), pack_lo, pack_hi);
|
|
|
b1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u8(r0), z, pack_lo, pack_hi);
|
|
|
r1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(b0, r0, g0, g1);
|
|
|
g0 = v_reinterpret_as_u16(v_rotate_left<1>(v_reinterpret_as_u8(g0)));
|
|
|
|
|
|
g1 = v_reinterpret_as_u16(v_rotate_left<1>(v_reinterpret_as_u8(g1)));
|
|
|
|
|
|
|
|
|
v_zip(b1, r1, r0, r1);
|
|
|
|
|
|
|
|
|
|
|
|
v_uint32x4 pack32_lo, pack32_hi;
|
|
|
v_zip(v_reinterpret_as_u32(g0), v_reinterpret_as_u32(r0), pack32_lo, pack32_hi);
|
|
|
b0 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_lo)));
|
|
|
|
|
|
b1 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_hi)));
|
|
|
|
|
|
v_store_low(dst-1+0, v_reinterpret_as_u8(b0));
|
|
|
v_store_high(dst-1+6*1, v_reinterpret_as_u8(b0));
|
|
|
v_store_low(dst-1+6*2, v_reinterpret_as_u8(b1));
|
|
|
v_store_high(dst-1+6*3, v_reinterpret_as_u8(b1));
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u32(g1), v_reinterpret_as_u32(r1), pack32_lo, pack32_hi);
|
|
|
g0 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_lo)));
|
|
|
|
|
|
g1 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_hi)));
|
|
|
|
|
|
v_store_low(dst-1+6*4, v_reinterpret_as_u8(g0));
|
|
|
v_store_high(dst-1+6*5, v_reinterpret_as_u8(g0));
|
|
|
|
|
|
v_store_low(dst-1+6*6, v_reinterpret_as_u8(g1));
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
return (int)(bayer - (bayer_end - width));
|
|
|
}
|
|
|
|
|
|
int bayer2RGBA(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue, const uchar alpha) const
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if CV_NEON
|
|
|
uint16x8_t masklo = vdupq_n_u16(255);
|
|
|
uint8x16x4_t pix;
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
pix.val[3] = vdupq_n_u8(alpha);
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 56 )
|
|
|
{
|
|
|
uint16x8_t r0 = vld1q_u16((const ushort*)bayer);
|
|
|
uint16x8_t r1 = vld1q_u16((const ushort*)(bayer + bayer_step));
|
|
|
uint16x8_t r2 = vld1q_u16((const ushort*)(bayer + bayer_step*2));
|
|
|
|
|
|
uint16x8_t b1 = vaddq_u16(vandq_u16(r0, masklo), vandq_u16(r2, masklo));
|
|
|
uint16x8_t nextb1 = vextq_u16(b1, b1, 1);
|
|
|
uint16x8_t b0 = vaddq_u16(b1, nextb1);
|
|
|
|
|
|
uint8x8x2_t bb = vzip_u8(vrshrn_n_u16(b0, 2), vrshrn_n_u16(nextb1, 1));
|
|
|
pix.val[1-blue] = vcombine_u8(bb.val[0], bb.val[1]);
|
|
|
|
|
|
uint16x8_t g0 = vaddq_u16(vshrq_n_u16(r0, 8), vshrq_n_u16(r2, 8));
|
|
|
uint16x8_t g1 = vandq_u16(r1, masklo);
|
|
|
g0 = vaddq_u16(g0, vaddq_u16(g1, vextq_u16(g1, g1, 1)));
|
|
|
g1 = vextq_u16(g1, g1, 1);
|
|
|
|
|
|
uint8x8x2_t gg = vzip_u8(vrshrn_n_u16(g0, 2), vmovn_u16(g1));
|
|
|
pix.val[1] = vcombine_u8(gg.val[0], gg.val[1]);
|
|
|
|
|
|
r0 = vshrq_n_u16(r1, 8);
|
|
|
r1 = vaddq_u16(r0, vextq_u16(r0, r0, 1));
|
|
|
|
|
|
uint8x8x2_t rr = vzip_u8(vmovn_u16(r0), vrshrn_n_u16(r1, 1));
|
|
|
pix.val[1+blue] = vcombine_u8(rr.val[0], rr.val[1]);
|
|
|
|
|
|
vst4q_u8(dst-1, pix);
|
|
|
}
|
|
|
#else
|
|
|
v_uint16x8 delta1 = v_setall_u16(1), delta2 = v_setall_u16(2);
|
|
|
v_uint16x8 mask = v_setall_u16(blue < 0 ? (ushort)(-1) : 0);
|
|
|
v_uint16x8 masklo = v_setall_u16(0x00ff);
|
|
|
v_uint8x16 a = v_setall_u8(alpha);
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 56 )
|
|
|
{
|
|
|
v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer));
|
|
|
v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step));
|
|
|
v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2));
|
|
|
|
|
|
v_uint16x8 b1 = v_add(v_and(r0, masklo), v_and(r2, masklo));
|
|
|
v_uint16x8 nextb1 = v_rotate_right<1>(b1);
|
|
|
v_uint16x8 b0 = v_add(b1, nextb1);
|
|
|
b1 = v_shr<1>(v_add(nextb1, delta1));
|
|
|
b0 = v_shr<2>(v_add(b0, delta2));
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(b0), v_reinterpret_as_s16(b1)));
|
|
|
|
|
|
v_uint16x8 g0 = v_add(v_shr<8>(r0), v_shr<8>(r2));
|
|
|
v_uint16x8 g1 = v_and(r1, masklo);
|
|
|
g0 = v_add(g0, v_add(v_rotate_right<1>(g1), g1));
|
|
|
g1 = v_rotate_right<1>(g1);
|
|
|
g0 = v_shr<2>(v_add(g0, delta2));
|
|
|
|
|
|
g0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(g1)));
|
|
|
|
|
|
r0 = v_shr<8>(r1);
|
|
|
r1 = v_add(v_rotate_right<1>(r0), r0);
|
|
|
r1 = v_shr<1>(v_add(r1, delta1));
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(r0), v_reinterpret_as_s16(r1)));
|
|
|
|
|
|
b1 = v_and(v_xor(b0, r0), mask);
|
|
|
b0 = v_xor(b0, b1);
|
|
|
r0 = v_xor(r0, b1);
|
|
|
|
|
|
|
|
|
v_uint8x16 pack_lo, pack_hi;
|
|
|
v_zip(v_reinterpret_as_u8(b0), v_reinterpret_as_u8(g0), pack_lo, pack_hi);
|
|
|
b1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u8(r0), a, pack_lo, pack_hi);
|
|
|
r1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(b0, r0, g0, g1);
|
|
|
|
|
|
|
|
|
|
|
|
v_zip(b1, r1, r0, r1);
|
|
|
|
|
|
|
|
|
|
|
|
v_uint32x4 pack32_lo, pack32_hi;
|
|
|
v_zip(v_reinterpret_as_u32(g0), v_reinterpret_as_u32(r0), pack32_lo, pack32_hi);
|
|
|
b0 = v_reinterpret_as_u16(pack32_lo);
|
|
|
|
|
|
b1 = v_reinterpret_as_u16(pack32_hi);
|
|
|
|
|
|
v_store_low(dst-1+0, v_reinterpret_as_u8(b0));
|
|
|
v_store_high(dst-1+8*1, v_reinterpret_as_u8(b0));
|
|
|
v_store_low(dst-1+8*2, v_reinterpret_as_u8(b1));
|
|
|
v_store_high(dst-1+8*3, v_reinterpret_as_u8(b1));
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u32(g1), v_reinterpret_as_u32(r1), pack32_lo, pack32_hi);
|
|
|
g0 = v_reinterpret_as_u16(pack32_lo);
|
|
|
|
|
|
g1 = v_reinterpret_as_u16(pack32_hi);
|
|
|
|
|
|
v_store_low(dst-1+8*4, v_reinterpret_as_u8(g0));
|
|
|
v_store_high(dst-1+8*5, v_reinterpret_as_u8(g0));
|
|
|
|
|
|
v_store_low(dst-1+8*6, v_reinterpret_as_u8(g1));
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
return (int)(bayer - (bayer_end - width));
|
|
|
}
|
|
|
|
|
|
int bayer2RGB_EA(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
|
|
|
{
|
|
|
const uchar* bayer_end = bayer + width;
|
|
|
v_uint16x8 masklow = v_setall_u16(0x00ff);
|
|
|
v_uint16x8 delta1 = v_setall_u16(1), delta2 = v_setall_u16(2);
|
|
|
v_uint16x8 full = v_setall_u16((ushort)(-1));
|
|
|
v_uint8x16 z = v_setzero_u8();
|
|
|
v_uint16x8 mask = v_setall_u16(blue > 0 ? (ushort)(-1) : 0);
|
|
|
|
|
|
for ( ; bayer <= bayer_end - 18; bayer += 14, dst += 42)
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer));
|
|
|
v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step));
|
|
|
v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2));
|
|
|
|
|
|
v_uint16x8 b1 = v_add(v_and(r0, masklow), v_and(r2, masklow));
|
|
|
v_uint16x8 nextb1 = v_rotate_right<1>(b1);
|
|
|
v_uint16x8 b0 = v_add(b1, nextb1);
|
|
|
b1 = v_shr<1>(v_add(nextb1, delta1));
|
|
|
b0 = v_shr<2>(v_add(b0, delta2));
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(b0), v_reinterpret_as_s16(b1)));
|
|
|
|
|
|
|
|
|
v_uint16x8 r0g = v_shr<8>(r0);
|
|
|
v_uint16x8 r2g = v_shr<8>(r2);
|
|
|
v_uint16x8 sumv = v_shr<1>(v_add(v_add(r0g, r2g), delta1));
|
|
|
|
|
|
v_uint16x8 g1 = v_and(r1, masklow);
|
|
|
v_uint16x8 nextg1 = v_rotate_right<1>(g1);
|
|
|
v_uint16x8 sumg = v_shr<1>(v_add(v_add(g1, nextg1), delta1));
|
|
|
|
|
|
|
|
|
v_uint16x8 gradv = v_add(v_sub(r0g, r2g), v_sub(r2g, r0g));
|
|
|
v_uint16x8 gradg = v_add(v_sub(nextg1, g1), v_sub(g1, nextg1));
|
|
|
v_uint16x8 gmask = v_gt(gradg, gradv);
|
|
|
v_uint16x8 g0 = v_add(v_and(gmask, sumv), v_and(sumg, v_xor(gmask, full)));
|
|
|
|
|
|
g0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(nextg1)));
|
|
|
|
|
|
r0 = v_shr<8>(r1);
|
|
|
r1 = v_add(v_rotate_right<1>(r0), r0);
|
|
|
r1 = v_shr<1>(v_add(r1, delta1));
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(v_pack_u(v_reinterpret_as_s16(r0), v_reinterpret_as_s16(r1)));
|
|
|
|
|
|
b1 = v_and(v_xor(b0, r0), mask);
|
|
|
b0 = v_xor(b0, b1);
|
|
|
r0 = v_xor(r0, b1);
|
|
|
|
|
|
|
|
|
v_uint8x16 pack_lo, pack_hi;
|
|
|
v_zip(v_reinterpret_as_u8(b0), v_reinterpret_as_u8(g0), pack_lo, pack_hi);
|
|
|
b1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
b0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u8(r0), z, pack_lo, pack_hi);
|
|
|
r1 = v_reinterpret_as_u16(pack_hi);
|
|
|
|
|
|
r0 = v_reinterpret_as_u16(pack_lo);
|
|
|
|
|
|
|
|
|
v_zip(b0, r0, g0, g1);
|
|
|
g0 = v_reinterpret_as_u16(v_rotate_left<1>(v_reinterpret_as_u8(g0)));
|
|
|
|
|
|
g1 = v_reinterpret_as_u16(v_rotate_left<1>(v_reinterpret_as_u8(g1)));
|
|
|
|
|
|
|
|
|
v_zip(b1, r1, r0, r1);
|
|
|
|
|
|
|
|
|
|
|
|
v_uint32x4 pack32_lo, pack32_hi;
|
|
|
v_zip(v_reinterpret_as_u32(g0), v_reinterpret_as_u32(r0), pack32_lo, pack32_hi);
|
|
|
b0 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_lo)));
|
|
|
|
|
|
b1 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_hi)));
|
|
|
|
|
|
v_store_low(dst+0, v_reinterpret_as_u8(b0));
|
|
|
v_store_high(dst+6*1, v_reinterpret_as_u8(b0));
|
|
|
v_store_low(dst+6*2, v_reinterpret_as_u8(b1));
|
|
|
v_store_high(dst+6*3, v_reinterpret_as_u8(b1));
|
|
|
|
|
|
|
|
|
v_zip(v_reinterpret_as_u32(g1), v_reinterpret_as_u32(r1), pack32_lo, pack32_hi);
|
|
|
g0 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_lo)));
|
|
|
|
|
|
g1 = v_reinterpret_as_u16(v_rotate_right<1>(v_reinterpret_as_u8(pack32_hi)));
|
|
|
|
|
|
v_store_low(dst+6*4, v_reinterpret_as_u8(g0));
|
|
|
v_store_high(dst+6*5, v_reinterpret_as_u8(g0));
|
|
|
|
|
|
v_store_low(dst+6*6, v_reinterpret_as_u8(g1));
|
|
|
}
|
|
|
|
|
|
return int(bayer - (bayer_end - width));
|
|
|
}
|
|
|
};
|
|
|
#else
|
|
|
typedef SIMDBayerStubInterpolator_<uchar> SIMDBayerInterpolator_8u;
|
|
|
#endif
|
|
|
|
|
|
|
|
|
template<typename T, class SIMDInterpolator>
|
|
|
class Bayer2Gray_Invoker :
|
|
|
public ParallelLoopBody
|
|
|
{
|
|
|
public:
|
|
|
Bayer2Gray_Invoker(const Mat& _srcmat, Mat& _dstmat, int _start_with_green, bool _brow,
|
|
|
const Size& _size, int _bcoeff, int _rcoeff) :
|
|
|
ParallelLoopBody(), srcmat(_srcmat), dstmat(_dstmat), Start_with_green(_start_with_green),
|
|
|
Brow(_brow), size(_size), Bcoeff(_bcoeff), Rcoeff(_rcoeff)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
virtual void operator ()(const Range& range) const CV_OVERRIDE
|
|
|
{
|
|
|
SIMDInterpolator vecOp;
|
|
|
const unsigned G2Y = 9617;
|
|
|
const int SHIFT = 14;
|
|
|
|
|
|
const T* bayer0 = srcmat.ptr<T>();
|
|
|
int bayer_step = (int)(srcmat.step/sizeof(T));
|
|
|
T* dst0 = (T*)dstmat.data;
|
|
|
int dst_step = (int)(dstmat.step/sizeof(T));
|
|
|
int bcoeff = Bcoeff, rcoeff = Rcoeff;
|
|
|
int start_with_green = Start_with_green;
|
|
|
bool brow = Brow;
|
|
|
|
|
|
dst0 += dst_step + 1;
|
|
|
|
|
|
if (range.start % 2)
|
|
|
{
|
|
|
brow = !brow;
|
|
|
std::swap(bcoeff, rcoeff);
|
|
|
start_with_green = !start_with_green;
|
|
|
}
|
|
|
|
|
|
bayer0 += range.start * bayer_step;
|
|
|
dst0 += range.start * dst_step;
|
|
|
|
|
|
for(int i = range.start ; i < range.end; ++i, bayer0 += bayer_step, dst0 += dst_step )
|
|
|
{
|
|
|
unsigned t0, t1, t2;
|
|
|
const T* bayer = bayer0;
|
|
|
T* dst = dst0;
|
|
|
const T* bayer_end = bayer + size.width;
|
|
|
|
|
|
if( size.width <= 0 )
|
|
|
{
|
|
|
dst[-1] = dst[size.width] = 0;
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if( start_with_green )
|
|
|
{
|
|
|
t0 = (bayer[1] + bayer[bayer_step*2+1])*rcoeff;
|
|
|
t1 = (bayer[bayer_step] + bayer[bayer_step+2])*bcoeff;
|
|
|
t2 = bayer[bayer_step+1]*(2*G2Y);
|
|
|
|
|
|
dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1);
|
|
|
bayer++;
|
|
|
dst++;
|
|
|
}
|
|
|
|
|
|
int delta = vecOp.bayer2Gray(bayer, bayer_step, dst, size.width, bcoeff, G2Y, rcoeff);
|
|
|
bayer += delta;
|
|
|
dst += delta;
|
|
|
|
|
|
for( ; bayer <= bayer_end - 2; bayer += 2, dst += 2 )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1])*G2Y;
|
|
|
t2 = bayer[bayer_step+1]*(4*bcoeff);
|
|
|
dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+2);
|
|
|
|
|
|
t0 = (bayer[2] + bayer[bayer_step*2+2])*rcoeff;
|
|
|
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3])*bcoeff;
|
|
|
t2 = bayer[bayer_step+2]*(2*G2Y);
|
|
|
dst[1] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1);
|
|
|
}
|
|
|
|
|
|
if( bayer < bayer_end )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1])*G2Y;
|
|
|
t2 = bayer[bayer_step+1]*(4*bcoeff);
|
|
|
dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+2);
|
|
|
bayer++;
|
|
|
dst++;
|
|
|
}
|
|
|
|
|
|
dst0[-1] = dst0[0];
|
|
|
dst0[size.width] = dst0[size.width-1];
|
|
|
|
|
|
brow = !brow;
|
|
|
std::swap(bcoeff, rcoeff);
|
|
|
start_with_green = !start_with_green;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
private:
|
|
|
Mat srcmat;
|
|
|
Mat dstmat;
|
|
|
int Start_with_green;
|
|
|
bool Brow;
|
|
|
Size size;
|
|
|
int Bcoeff, Rcoeff;
|
|
|
};
|
|
|
|
|
|
template<typename T, typename SIMDInterpolator>
|
|
|
static void Bayer2Gray_( const Mat& srcmat, Mat& dstmat, int code )
|
|
|
{
|
|
|
const int R2Y = 4899;
|
|
|
const int B2Y = 1868;
|
|
|
|
|
|
Size size = srcmat.size();
|
|
|
int bcoeff = B2Y, rcoeff = R2Y;
|
|
|
int start_with_green = code == COLOR_BayerGB2GRAY || code == COLOR_BayerGR2GRAY;
|
|
|
bool brow = true;
|
|
|
|
|
|
if( code != COLOR_BayerBG2GRAY && code != COLOR_BayerGB2GRAY )
|
|
|
{
|
|
|
brow = false;
|
|
|
std::swap(bcoeff, rcoeff);
|
|
|
}
|
|
|
size.height -= 2;
|
|
|
size.width -= 2;
|
|
|
|
|
|
if (size.height > 0)
|
|
|
{
|
|
|
Range range(0, size.height);
|
|
|
Bayer2Gray_Invoker<T, SIMDInterpolator> invoker(srcmat, dstmat,
|
|
|
start_with_green, brow, size, bcoeff, rcoeff);
|
|
|
parallel_for_(range, invoker, dstmat.total()/static_cast<double>(1<<16));
|
|
|
}
|
|
|
|
|
|
size = dstmat.size();
|
|
|
T* dst0 = dstmat.ptr<T>();
|
|
|
int dst_step = (int)(dstmat.step/sizeof(T));
|
|
|
if( size.height > 2 )
|
|
|
for( int i = 0; i < size.width; i++ )
|
|
|
{
|
|
|
dst0[i] = dst0[i + dst_step];
|
|
|
dst0[i + (size.height-1)*dst_step] = dst0[i + (size.height-2)*dst_step];
|
|
|
}
|
|
|
else
|
|
|
for( int i = 0; i < size.width; i++ )
|
|
|
dst0[i] = dst0[i + (size.height-1)*dst_step] = 0;
|
|
|
}
|
|
|
|
|
|
template <typename T>
|
|
|
struct Alpha
|
|
|
{
|
|
|
static T value() { return std::numeric_limits<T>::max(); }
|
|
|
};
|
|
|
|
|
|
template <>
|
|
|
struct Alpha<float>
|
|
|
{
|
|
|
static float value() { return 1.0f; }
|
|
|
};
|
|
|
|
|
|
template <typename T, typename SIMDInterpolator>
|
|
|
class Bayer2RGB_Invoker :
|
|
|
public ParallelLoopBody
|
|
|
{
|
|
|
public:
|
|
|
Bayer2RGB_Invoker(const Mat& _srcmat, Mat& _dstmat, int _start_with_green, int _blue, const Size& _size) :
|
|
|
ParallelLoopBody(),
|
|
|
srcmat(_srcmat), dstmat(_dstmat), Start_with_green(_start_with_green), Blue(_blue), size(_size)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
virtual void operator() (const Range& range) const CV_OVERRIDE
|
|
|
{
|
|
|
SIMDInterpolator vecOp;
|
|
|
T alpha = Alpha<T>::value();
|
|
|
int dcn = dstmat.channels();
|
|
|
int dcn2 = dcn << 1;
|
|
|
|
|
|
int bayer_step = (int)(srcmat.step/sizeof(T));
|
|
|
const T* bayer0 = srcmat.ptr<T>() + bayer_step * range.start;
|
|
|
|
|
|
int dst_step = (int)(dstmat.step/sizeof(T));
|
|
|
T* dst0 = reinterpret_cast<T*>(dstmat.data) + (range.start + 1) * dst_step + dcn + 1;
|
|
|
|
|
|
int blue = Blue, start_with_green = Start_with_green;
|
|
|
if (range.start % 2)
|
|
|
{
|
|
|
blue = -blue;
|
|
|
start_with_green = !start_with_green;
|
|
|
}
|
|
|
|
|
|
for (int i = range.start; i < range.end; bayer0 += bayer_step, dst0 += dst_step, ++i )
|
|
|
{
|
|
|
int t0, t1;
|
|
|
const T* bayer = bayer0;
|
|
|
T* dst = dst0;
|
|
|
const T* bayer_end = bayer + size.width;
|
|
|
|
|
|
|
|
|
if( size.width <= 0 )
|
|
|
{
|
|
|
if (dcn == 3)
|
|
|
{
|
|
|
dst[-4] = dst[-3] = dst[-2] = dst[size.width*dcn-1] =
|
|
|
dst[size.width*dcn] = dst[size.width*dcn+1] = 0;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
dst[-5] = dst[-4] = dst[-3] = dst[size.width*dcn-1] =
|
|
|
dst[size.width*dcn] = dst[size.width*dcn+1] = 0;
|
|
|
dst[-2] = dst[size.width*dcn+2] = alpha;
|
|
|
}
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if( start_with_green )
|
|
|
{
|
|
|
t0 = (bayer[1] + bayer[bayer_step*2+1] + 1) >> 1;
|
|
|
t1 = (bayer[bayer_step] + bayer[bayer_step+2] + 1) >> 1;
|
|
|
|
|
|
dst[-blue] = (T)t0;
|
|
|
dst[0] = bayer[bayer_step+1];
|
|
|
dst[blue] = (T)t1;
|
|
|
if (dcn == 4)
|
|
|
dst[2] = alpha;
|
|
|
|
|
|
bayer++;
|
|
|
dst += dcn;
|
|
|
}
|
|
|
|
|
|
|
|
|
int delta = dcn == 4 ?
|
|
|
vecOp.bayer2RGBA(bayer, bayer_step, dst, size.width, blue, alpha) :
|
|
|
vecOp.bayer2RGB(bayer, bayer_step, dst, size.width, blue);
|
|
|
bayer += delta;
|
|
|
dst += delta*dcn;
|
|
|
|
|
|
if (dcn == 3)
|
|
|
{
|
|
|
if( blue > 0 )
|
|
|
{
|
|
|
for( ; bayer <= bayer_end - 2; bayer += 2, dst += dcn2 )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
|
|
|
bayer[bayer_step*2+2] + 2) >> 2;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] +
|
|
|
bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2;
|
|
|
dst[-1] = (T)t0;
|
|
|
dst[0] = (T)t1;
|
|
|
dst[1] = bayer[bayer_step+1];
|
|
|
|
|
|
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
|
|
|
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
|
|
|
dst[2] = (T)t0;
|
|
|
dst[3] = bayer[bayer_step+2];
|
|
|
dst[4] = (T)t1;
|
|
|
}
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
for( ; bayer <= bayer_end - 2; bayer += 2, dst += dcn2 )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
|
|
|
bayer[bayer_step*2+2] + 2) >> 2;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] +
|
|
|
bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2;
|
|
|
dst[1] = (T)t0;
|
|
|
dst[0] = (T)t1;
|
|
|
dst[-1] = bayer[bayer_step+1];
|
|
|
|
|
|
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
|
|
|
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
|
|
|
dst[4] = (T)t0;
|
|
|
dst[3] = bayer[bayer_step+2];
|
|
|
dst[2] = (T)t1;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
|
|
|
if( blue > 0 )
|
|
|
{
|
|
|
for( ; bayer <= bayer_end - 2; bayer += 2, dst += dcn2 )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
|
|
|
bayer[bayer_step*2+2] + 2) >> 2;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] +
|
|
|
bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2;
|
|
|
dst[-1] = (T)t0;
|
|
|
dst[0] = (T)t1;
|
|
|
dst[1] = bayer[bayer_step+1];
|
|
|
dst[2] = alpha;
|
|
|
|
|
|
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
|
|
|
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
|
|
|
dst[3] = (T)t0;
|
|
|
dst[4] = bayer[bayer_step+2];
|
|
|
dst[5] = (T)t1;
|
|
|
dst[6] = alpha;
|
|
|
}
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
for( ; bayer <= bayer_end - 2; bayer += 2, dst += dcn2 )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
|
|
|
bayer[bayer_step*2+2] + 2) >> 2;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] +
|
|
|
bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2;
|
|
|
dst[-1] = bayer[bayer_step+1];
|
|
|
dst[0] = (T)t1;
|
|
|
dst[1] = (T)t0;
|
|
|
dst[2] = alpha;
|
|
|
|
|
|
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
|
|
|
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
|
|
|
dst[3] = (T)t1;
|
|
|
dst[4] = bayer[bayer_step+2];
|
|
|
dst[5] = (T)t0;
|
|
|
dst[6] = alpha;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
if( bayer < bayer_end )
|
|
|
{
|
|
|
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
|
|
|
bayer[bayer_step*2+2] + 2) >> 2;
|
|
|
t1 = (bayer[1] + bayer[bayer_step] +
|
|
|
bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2;
|
|
|
dst[-blue] = (T)t0;
|
|
|
dst[0] = (T)t1;
|
|
|
dst[blue] = bayer[bayer_step+1];
|
|
|
if (dcn == 4)
|
|
|
dst[2] = alpha;
|
|
|
bayer++;
|
|
|
dst += dcn;
|
|
|
}
|
|
|
|
|
|
|
|
|
if (dcn == 3)
|
|
|
{
|
|
|
dst0[-4] = dst0[-1];
|
|
|
dst0[-3] = dst0[0];
|
|
|
dst0[-2] = dst0[1];
|
|
|
dst0[size.width*dcn-1] = dst0[size.width*dcn-4];
|
|
|
dst0[size.width*dcn] = dst0[size.width*dcn-3];
|
|
|
dst0[size.width*dcn+1] = dst0[size.width*dcn-2];
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
dst0[-5] = dst0[-1];
|
|
|
dst0[-4] = dst0[0];
|
|
|
dst0[-3] = dst0[1];
|
|
|
dst0[-2] = dst0[2];
|
|
|
dst0[size.width*dcn-1] = dst0[size.width*dcn-5];
|
|
|
dst0[size.width*dcn] = dst0[size.width*dcn-4];
|
|
|
dst0[size.width*dcn+1] = dst0[size.width*dcn-3];
|
|
|
dst0[size.width*dcn+2] = dst0[size.width*dcn-2];
|
|
|
}
|
|
|
|
|
|
blue = -blue;
|
|
|
start_with_green = !start_with_green;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
private:
|
|
|
Mat srcmat;
|
|
|
Mat dstmat;
|
|
|
int Start_with_green, Blue;
|
|
|
Size size;
|
|
|
};
|
|
|
|
|
|
template<typename T, class SIMDInterpolator>
|
|
|
static void Bayer2RGB_( const Mat& srcmat, Mat& dstmat, int code )
|
|
|
{
|
|
|
int dst_step = (int)(dstmat.step/sizeof(T));
|
|
|
Size size = srcmat.size();
|
|
|
int blue = (code == COLOR_BayerBG2BGR || code == COLOR_BayerGB2BGR ||
|
|
|
code == COLOR_BayerBG2BGRA || code == COLOR_BayerGB2BGRA ) ? -1 : 1;
|
|
|
int start_with_green = (code == COLOR_BayerGB2BGR || code == COLOR_BayerGR2BGR ||
|
|
|
code == COLOR_BayerGB2BGRA || code == COLOR_BayerGR2BGRA);
|
|
|
|
|
|
int dcn = dstmat.channels();
|
|
|
size.height -= 2;
|
|
|
size.width -= 2;
|
|
|
|
|
|
if (size.height > 0)
|
|
|
{
|
|
|
Range range(0, size.height);
|
|
|
Bayer2RGB_Invoker<T, SIMDInterpolator> invoker(srcmat, dstmat, start_with_green, blue, size);
|
|
|
parallel_for_(range, invoker, dstmat.total()/static_cast<double>(1<<16));
|
|
|
}
|
|
|
|
|
|
|
|
|
size = dstmat.size();
|
|
|
T* dst0 = dstmat.ptr<T>();
|
|
|
if( size.height > 2 )
|
|
|
for( int i = 0; i < size.width*dcn; i++ )
|
|
|
{
|
|
|
dst0[i] = dst0[i + dst_step];
|
|
|
dst0[i + (size.height-1)*dst_step] = dst0[i + (size.height-2)*dst_step];
|
|
|
}
|
|
|
else
|
|
|
for( int i = 0; i < size.width*dcn; i++ )
|
|
|
dst0[i] = dst0[i + (size.height-1)*dst_step] = 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
|
|
|
{
|
|
|
const uchar* bayer = srcmat.ptr();
|
|
|
int bstep = (int)srcmat.step;
|
|
|
uchar* dst = dstmat.ptr();
|
|
|
int dststep = (int)dstmat.step;
|
|
|
Size size = srcmat.size();
|
|
|
|
|
|
int blueIdx = code == COLOR_BayerBG2BGR_VNG || code == COLOR_BayerGB2BGR_VNG ? 0 : 2;
|
|
|
bool greenCell0 = code != COLOR_BayerBG2BGR_VNG && code != COLOR_BayerRG2BGR_VNG;
|
|
|
|
|
|
|
|
|
if( MIN(size.width, size.height) < 8 )
|
|
|
{
|
|
|
Bayer2RGB_<uchar, SIMDBayerInterpolator_8u>( srcmat, dstmat, code );
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
const int brows = 3, bcn = 7;
|
|
|
int N = size.width, N2 = N*2, N3 = N*3, N4 = N*4, N5 = N*5, N6 = N*6, N7 = N*7;
|
|
|
int i, bufstep = N7*bcn;
|
|
|
cv::AutoBuffer<ushort> _buf(bufstep*brows);
|
|
|
ushort* buf = _buf.data();
|
|
|
|
|
|
bayer += bstep*2;
|
|
|
|
|
|
for( int y = 2; y < size.height - 4; y++ )
|
|
|
{
|
|
|
uchar* dstrow = dst + dststep*y + 6;
|
|
|
const uchar* srow;
|
|
|
|
|
|
for( int dy = (y == 2 ? -1 : 1); dy <= 1; dy++ )
|
|
|
{
|
|
|
ushort* brow = buf + ((y + dy - 1)%brows)*bufstep + 1;
|
|
|
srow = bayer + (y+dy)*bstep + 1;
|
|
|
|
|
|
for( i = 0; i < bcn; i++ )
|
|
|
brow[N*i-1] = brow[(N-2) + N*i] = 0;
|
|
|
|
|
|
i = 1;
|
|
|
|
|
|
#if CV_SIMD128
|
|
|
for( ; i <= N-9; i += 8, srow += 8, brow += 8 )
|
|
|
{
|
|
|
v_uint16x8 s1, s2, s3, s4, s6, s7, s8, s9;
|
|
|
|
|
|
s1 = v_load_expand(srow-1-bstep);
|
|
|
s2 = v_load_expand(srow-bstep);
|
|
|
s3 = v_load_expand(srow+1-bstep);
|
|
|
|
|
|
s4 = v_load_expand(srow-1);
|
|
|
s6 = v_load_expand(srow+1);
|
|
|
|
|
|
s7 = v_load_expand(srow-1+bstep);
|
|
|
s8 = v_load_expand(srow+bstep);
|
|
|
s9 = v_load_expand(srow+1+bstep);
|
|
|
|
|
|
v_uint16x8 b0, b1, b2, b3, b4, b5, b6;
|
|
|
|
|
|
b0 = v_add(v_add(v_shl<1>(v_absdiff(s2, s8)), v_absdiff(s1, s7)), v_absdiff(s3, s9));
|
|
|
b1 = v_add(v_add(v_shl<1>(v_absdiff(s4, s6)), v_absdiff(s1, s3)), v_absdiff(s7, s9));
|
|
|
b2 = v_shl<1>(v_absdiff(s3, s7));
|
|
|
b3 = v_shl<1>(v_absdiff(s1, s9));
|
|
|
|
|
|
v_store(brow, b0);
|
|
|
v_store(brow + N, b1);
|
|
|
v_store(brow + N2, b2);
|
|
|
v_store(brow + N3, b3);
|
|
|
|
|
|
b4 = v_add(v_add(b2, v_absdiff(s2, s4)), v_absdiff(s6, s8));
|
|
|
b5 = v_add(v_add(b3, v_absdiff(s2, s6)), v_absdiff(s4, s8));
|
|
|
b6 = v_shr<1>(v_add(v_add(v_add(s2, s4), s6), s8));
|
|
|
|
|
|
v_store(brow + N4, b4);
|
|
|
v_store(brow + N5, b5);
|
|
|
v_store(brow + N6, b6);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
for( ; i < N-1; i++, srow++, brow++ )
|
|
|
{
|
|
|
brow[0] = (ushort)(std::abs(srow[-1-bstep] - srow[-1+bstep]) +
|
|
|
std::abs(srow[-bstep] - srow[+bstep])*2 +
|
|
|
std::abs(srow[1-bstep] - srow[1+bstep]));
|
|
|
brow[N] = (ushort)(std::abs(srow[-1-bstep] - srow[1-bstep]) +
|
|
|
std::abs(srow[-1] - srow[1])*2 +
|
|
|
std::abs(srow[-1+bstep] - srow[1+bstep]));
|
|
|
brow[N2] = (ushort)(std::abs(srow[+1-bstep] - srow[-1+bstep])*2);
|
|
|
brow[N3] = (ushort)(std::abs(srow[-1-bstep] - srow[1+bstep])*2);
|
|
|
brow[N4] = (ushort)(brow[N2] + std::abs(srow[-bstep] - srow[-1]) +
|
|
|
std::abs(srow[+bstep] - srow[1]));
|
|
|
brow[N5] = (ushort)(brow[N3] + std::abs(srow[-bstep] - srow[1]) +
|
|
|
std::abs(srow[+bstep] - srow[-1]));
|
|
|
brow[N6] = (ushort)((srow[-bstep] + srow[-1] + srow[1] + srow[+bstep])>>1);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
const ushort* brow0 = buf + ((y - 2) % brows)*bufstep + 2;
|
|
|
const ushort* brow1 = buf + ((y - 1) % brows)*bufstep + 2;
|
|
|
const ushort* brow2 = buf + (y % brows)*bufstep + 2;
|
|
|
static const float scale[] = { 0.f, 0.5f, 0.25f, 0.1666666666667f, 0.125f, 0.1f, 0.08333333333f, 0.0714286f, 0.0625f };
|
|
|
srow = bayer + y*bstep + 2;
|
|
|
bool greenCell = greenCell0;
|
|
|
|
|
|
i = 2;
|
|
|
#if CV_SIMD128
|
|
|
int limit = greenCell ? std::min(3, N-2) : 2;
|
|
|
#else
|
|
|
int limit = N - 2;
|
|
|
#endif
|
|
|
|
|
|
do
|
|
|
{
|
|
|
for( ; i < limit; i++, srow++, brow0++, brow1++, brow2++, dstrow += 3 )
|
|
|
{
|
|
|
int gradN = brow0[0] + brow1[0];
|
|
|
int gradS = brow1[0] + brow2[0];
|
|
|
int gradW = brow1[N-1] + brow1[N];
|
|
|
int gradE = brow1[N] + brow1[N+1];
|
|
|
int minGrad = std::min(std::min(std::min(gradN, gradS), gradW), gradE);
|
|
|
int maxGrad = std::max(std::max(std::max(gradN, gradS), gradW), gradE);
|
|
|
int R, G, B;
|
|
|
|
|
|
if( !greenCell )
|
|
|
{
|
|
|
int gradNE = brow0[N4+1] + brow1[N4];
|
|
|
int gradSW = brow1[N4] + brow2[N4-1];
|
|
|
int gradNW = brow0[N5-1] + brow1[N5];
|
|
|
int gradSE = brow1[N5] + brow2[N5+1];
|
|
|
|
|
|
minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE);
|
|
|
maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE);
|
|
|
int T = minGrad + MAX(maxGrad/2, 1);
|
|
|
|
|
|
int Rs = 0, Gs = 0, Bs = 0, ng = 0;
|
|
|
if( gradN < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2] + srow[0];
|
|
|
Gs += srow[-bstep]*2;
|
|
|
Bs += srow[-bstep-1] + srow[-bstep+1];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradS < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2] + srow[0];
|
|
|
Gs += srow[bstep]*2;
|
|
|
Bs += srow[bstep-1] + srow[bstep+1];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradW < T )
|
|
|
{
|
|
|
Rs += srow[-2] + srow[0];
|
|
|
Gs += srow[-1]*2;
|
|
|
Bs += srow[-bstep-1] + srow[bstep-1];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradE < T )
|
|
|
{
|
|
|
Rs += srow[2] + srow[0];
|
|
|
Gs += srow[1]*2;
|
|
|
Bs += srow[-bstep+1] + srow[bstep+1];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradNE < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2+2] + srow[0];
|
|
|
Gs += brow0[N6+1];
|
|
|
Bs += srow[-bstep+1]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradSW < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2-2] + srow[0];
|
|
|
Gs += brow2[N6-1];
|
|
|
Bs += srow[bstep-1]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradNW < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2-2] + srow[0];
|
|
|
Gs += brow0[N6-1];
|
|
|
Bs += srow[-bstep-1]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradSE < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2+2] + srow[0];
|
|
|
Gs += brow2[N6+1];
|
|
|
Bs += srow[bstep+1]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
R = srow[0];
|
|
|
G = R + cvRound((Gs - Rs)*scale[ng]);
|
|
|
B = R + cvRound((Bs - Rs)*scale[ng]);
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
int gradNE = brow0[N2] + brow0[N2+1] + brow1[N2] + brow1[N2+1];
|
|
|
int gradSW = brow1[N2] + brow1[N2-1] + brow2[N2] + brow2[N2-1];
|
|
|
int gradNW = brow0[N3] + brow0[N3-1] + brow1[N3] + brow1[N3-1];
|
|
|
int gradSE = brow1[N3] + brow1[N3+1] + brow2[N3] + brow2[N3+1];
|
|
|
|
|
|
minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE);
|
|
|
maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE);
|
|
|
int T = minGrad + MAX(maxGrad/2, 1);
|
|
|
|
|
|
int Rs = 0, Gs = 0, Bs = 0, ng = 0;
|
|
|
if( gradN < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2-1] + srow[-bstep*2+1];
|
|
|
Gs += srow[-bstep*2] + srow[0];
|
|
|
Bs += srow[-bstep]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradS < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2-1] + srow[bstep*2+1];
|
|
|
Gs += srow[bstep*2] + srow[0];
|
|
|
Bs += srow[bstep]*2;
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradW < T )
|
|
|
{
|
|
|
Rs += srow[-1]*2;
|
|
|
Gs += srow[-2] + srow[0];
|
|
|
Bs += srow[-bstep-2]+srow[bstep-2];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradE < T )
|
|
|
{
|
|
|
Rs += srow[1]*2;
|
|
|
Gs += srow[2] + srow[0];
|
|
|
Bs += srow[-bstep+2]+srow[bstep+2];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradNE < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2+1] + srow[1];
|
|
|
Gs += srow[-bstep+1]*2;
|
|
|
Bs += srow[-bstep] + srow[-bstep+2];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradSW < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2-1] + srow[-1];
|
|
|
Gs += srow[bstep-1]*2;
|
|
|
Bs += srow[bstep] + srow[bstep-2];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradNW < T )
|
|
|
{
|
|
|
Rs += srow[-bstep*2-1] + srow[-1];
|
|
|
Gs += srow[-bstep-1]*2;
|
|
|
Bs += srow[-bstep-2]+srow[-bstep];
|
|
|
ng++;
|
|
|
}
|
|
|
if( gradSE < T )
|
|
|
{
|
|
|
Rs += srow[bstep*2+1] + srow[1];
|
|
|
Gs += srow[bstep+1]*2;
|
|
|
Bs += srow[bstep+2]+srow[bstep];
|
|
|
ng++;
|
|
|
}
|
|
|
G = srow[0];
|
|
|
R = G + cvRound((Rs - Gs)*scale[ng]);
|
|
|
B = G + cvRound((Bs - Gs)*scale[ng]);
|
|
|
}
|
|
|
dstrow[blueIdx] = cv::saturate_cast<uchar>(B);
|
|
|
dstrow[1] = cv::saturate_cast<uchar>(G);
|
|
|
dstrow[blueIdx^2] = cv::saturate_cast<uchar>(R);
|
|
|
greenCell = !greenCell;
|
|
|
}
|
|
|
|
|
|
#if CV_SIMD128
|
|
|
v_uint32x4 emask = v_setall_u32(0x0000ffff), omask = v_setall_u32(0xffff0000);
|
|
|
v_uint16x8 one = v_setall_u16(1), z = v_setzero_u16();
|
|
|
v_float32x4 _0_5 = v_setall_f32(0.5f);
|
|
|
|
|
|
#define v_merge_u16(a, b) (v_or((v_and((a), v_reinterpret_as_u16(emask))), (v_and((b), v_reinterpret_as_u16(omask)))))
|
|
|
#define v_cvt_s16f32_lo(a) v_cvt_f32(v_expand_low(v_reinterpret_as_s16(a)))
|
|
|
#define v_cvt_s16f32_hi(a) v_cvt_f32(v_expand_high(v_reinterpret_as_s16(a)))
|
|
|
|
|
|
|
|
|
for( ; i <= N - 10; i += 8, srow += 8, brow0 += 8, brow1 += 8, brow2 += 8 )
|
|
|
{
|
|
|
|
|
|
v_uint16x8 gradN = v_add(v_load(brow0), v_load(brow1));
|
|
|
|
|
|
|
|
|
v_uint16x8 gradS = v_add(v_load(brow1), v_load(brow2));
|
|
|
|
|
|
|
|
|
v_uint16x8 gradW = v_add(v_load(brow1 + N - 1), v_load(brow1 + N));
|
|
|
|
|
|
|
|
|
v_uint16x8 gradE = v_add(v_load(brow1 + N + 1), v_load(brow1 + N));
|
|
|
|
|
|
|
|
|
|
|
|
v_uint16x8 minGrad = v_min(v_min(gradN, gradS), v_min(gradW, gradE));
|
|
|
v_uint16x8 maxGrad = v_max(v_max(gradN, gradS), v_max(gradW, gradE));
|
|
|
|
|
|
v_uint16x8 grad0, grad1;
|
|
|
|
|
|
|
|
|
|
|
|
grad0 = v_add(v_load(brow0 + N4 + 1), v_load(brow1 + N4));
|
|
|
grad1 = v_add(v_add(v_add(v_load(brow0 + N2), v_load(brow0 + N2 + 1)), v_load(brow1 + N2)), v_load(brow1 + N2 + 1));
|
|
|
v_uint16x8 gradNE = v_merge_u16(grad0, grad1);
|
|
|
|
|
|
|
|
|
|
|
|
grad0 = v_add(v_load(brow2 + N4 - 1), v_load(brow1 + N4));
|
|
|
grad1 = v_add(v_add(v_add(v_load(brow2 + N2), v_load(brow2 + N2 - 1)), v_load(brow1 + N2)), v_load(brow1 + N2 - 1));
|
|
|
v_uint16x8 gradSW = v_merge_u16(grad0, grad1);
|
|
|
|
|
|
minGrad = v_min(v_min(minGrad, gradNE), gradSW);
|
|
|
maxGrad = v_max(v_max(maxGrad, gradNE), gradSW);
|
|
|
|
|
|
|
|
|
|
|
|
grad0 = v_add(v_load(brow0 + N5 - 1), v_load(brow1 + N5));
|
|
|
grad1 = v_add(v_add(v_add(v_load(brow0 + N3), v_load(brow0 + N3 - 1)), v_load(brow1 + N3)), v_load(brow1 + N3 - 1));
|
|
|
v_uint16x8 gradNW = v_merge_u16(grad0, grad1);
|
|
|
|
|
|
|
|
|
|
|
|
grad0 = v_add(v_load(brow2 + N5 + 1), v_load(brow1 + N5));
|
|
|
grad1 = v_add(v_add(v_add(v_load(brow2 + N3), v_load(brow2 + N3 + 1)), v_load(brow1 + N3)), v_load(brow1 + N3 + 1));
|
|
|
v_uint16x8 gradSE = v_merge_u16(grad0, grad1);
|
|
|
|
|
|
minGrad = v_min(v_min(minGrad, gradNW), gradSE);
|
|
|
maxGrad = v_max(v_max(maxGrad, gradNW), gradSE);
|
|
|
|
|
|
|
|
|
v_uint16x8 T = v_add(v_max((v_shr<1>(maxGrad)), one), minGrad);
|
|
|
|
|
|
v_uint16x8 RGs = z, GRs = z, Bs = z, ng = z;
|
|
|
|
|
|
v_uint16x8 x0 = v_load_expand(srow +0);
|
|
|
v_uint16x8 x1 = v_load_expand(srow -1 - bstep);
|
|
|
v_uint16x8 x2 = v_load_expand(srow -1 - bstep*2);
|
|
|
v_uint16x8 x3 = v_load_expand(srow - bstep);
|
|
|
v_uint16x8 x4 = v_load_expand(srow +1 - bstep*2);
|
|
|
v_uint16x8 x5 = v_load_expand(srow +1 - bstep);
|
|
|
v_uint16x8 x6 = v_load_expand(srow +2 - bstep);
|
|
|
v_uint16x8 x7 = v_load_expand(srow +1);
|
|
|
v_uint16x8 x8 = v_load_expand(srow +2 + bstep);
|
|
|
v_uint16x8 x9 = v_load_expand(srow +1 + bstep);
|
|
|
v_uint16x8 x10 = v_load_expand(srow +1 + bstep*2);
|
|
|
v_uint16x8 x11 = v_load_expand(srow + bstep);
|
|
|
v_uint16x8 x12 = v_load_expand(srow -1 + bstep*2);
|
|
|
v_uint16x8 x13 = v_load_expand(srow -1 + bstep);
|
|
|
v_uint16x8 x14 = v_load_expand(srow -2 + bstep);
|
|
|
v_uint16x8 x15 = v_load_expand(srow -1);
|
|
|
v_uint16x8 x16 = v_load_expand(srow -2 - bstep);
|
|
|
|
|
|
v_uint16x8 t0, t1, mask;
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradN));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x3));
|
|
|
t1 = v_add(v_load_expand(srow - bstep * 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(t1, mask));
|
|
|
|
|
|
GRs = v_add(GRs, (v_and(v_merge_u16(t0, v_add(x2, x4)), mask)));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(v_add(x1, x5), t0), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradNE));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x5));
|
|
|
t1 = v_add(v_load_expand(srow - bstep * 2 + 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(v_merge_u16(t1, t0), mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(v_merge_u16(v_load(brow0+N6+1), v_add(x4, x7)), mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(t0, v_add(x3, x6)), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradE));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x7));
|
|
|
t1 = v_add(v_load_expand(srow + 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(t1, mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(t0, mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(v_add(x5, x9), v_add(x6, x8)), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradSE));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x9));
|
|
|
t1 = v_add(v_load_expand(srow + bstep * 2 + 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(v_merge_u16(t1, t0), mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(v_merge_u16(v_load(brow2+N6+1), v_add(x7, x10)), mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16((v_shl<1>(x9)), v_add(x8, x11)), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradS));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x11));
|
|
|
t1 = v_add(v_load_expand(srow + bstep * 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(t1, mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(v_merge_u16(t0, v_add(x10, x12)), mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(v_add(x9, x13), t0), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradSW));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x13));
|
|
|
t1 = v_add(v_load_expand(srow + bstep * 2 - 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(v_merge_u16(t1, t0), mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(v_merge_u16(v_load(brow2+N6-1), v_add(x12, x15)), mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(t0, v_add(x11, x14)), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradW));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x15));
|
|
|
t1 = v_add(v_load_expand(srow - 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(t1, mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(t0, mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(v_add(x1, x13), v_add(x14, x16)), mask));
|
|
|
|
|
|
|
|
|
mask = (v_gt(T, gradNW));
|
|
|
ng = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(ng), v_reinterpret_as_s16(mask)));
|
|
|
|
|
|
t0 = (v_shl<1>(x1));
|
|
|
t1 = v_add(v_load_expand(srow - bstep * 2 - 2), x0);
|
|
|
|
|
|
|
|
|
RGs = v_add(RGs, v_and(v_merge_u16(t1, t0), mask));
|
|
|
|
|
|
GRs = v_add(GRs, v_and(v_merge_u16(v_load(brow0+N6-1), v_add(x2, x15)), mask));
|
|
|
|
|
|
Bs = v_add(Bs, v_and(v_merge_u16(v_shl<1>(x1), v_add(x3, x16)), mask));
|
|
|
|
|
|
v_float32x4 ngf0 = v_div(_0_5, v_cvt_s16f32_lo(ng));
|
|
|
v_float32x4 ngf1 = v_div(_0_5, v_cvt_s16f32_hi(ng));
|
|
|
|
|
|
|
|
|
t0 = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(GRs), v_reinterpret_as_s16(RGs)));
|
|
|
t1 = v_reinterpret_as_u16(v_sub(v_reinterpret_as_s16(Bs), v_reinterpret_as_s16(RGs)));
|
|
|
|
|
|
t0 = v_reinterpret_as_u16(
|
|
|
v_add(v_reinterpret_as_s16(x0),
|
|
|
v_pack(
|
|
|
v_round(v_mul(v_cvt_s16f32_lo(t0), ngf0)),
|
|
|
v_round(v_mul(v_cvt_s16f32_hi(t0), ngf1)))));
|
|
|
|
|
|
t1 = v_reinterpret_as_u16(
|
|
|
v_add(v_reinterpret_as_s16(x0),
|
|
|
v_pack(
|
|
|
v_round(v_mul(v_cvt_s16f32_lo(t1), ngf0)),
|
|
|
v_round(v_mul(v_cvt_s16f32_hi(t1), ngf1)))));
|
|
|
|
|
|
x1 = v_merge_u16(x0, t0);
|
|
|
x2 = v_merge_u16(t0, x0);
|
|
|
|
|
|
uchar R[8], G[8], B[8];
|
|
|
|
|
|
v_store_low(blueIdx ? B : R, v_pack_u(v_reinterpret_as_s16(x1), v_reinterpret_as_s16(z)));
|
|
|
v_store_low(G, v_pack_u(v_reinterpret_as_s16(x2), v_reinterpret_as_s16(z)));
|
|
|
v_store_low(blueIdx ? R : B, v_pack_u(v_reinterpret_as_s16(t1), v_reinterpret_as_s16(z)));
|
|
|
|
|
|
for( int j = 0; j < 8; j++, dstrow += 3 )
|
|
|
{
|
|
|
dstrow[0] = B[j]; dstrow[1] = G[j]; dstrow[2] = R[j];
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
limit = N - 2;
|
|
|
}
|
|
|
while( i < N - 2 );
|
|
|
|
|
|
for( i = 0; i < 6; i++ )
|
|
|
{
|
|
|
dst[dststep*y + 5 - i] = dst[dststep*y + 8 - i];
|
|
|
dst[dststep*y + (N - 2)*3 + i] = dst[dststep*y + (N - 3)*3 + i];
|
|
|
}
|
|
|
|
|
|
greenCell0 = !greenCell0;
|
|
|
blueIdx ^= 2;
|
|
|
}
|
|
|
|
|
|
for( i = 0; i < size.width*3; i++ )
|
|
|
{
|
|
|
dst[i] = dst[i + dststep] = dst[i + dststep*2];
|
|
|
dst[i + dststep*(size.height-4)] =
|
|
|
dst[i + dststep*(size.height-3)] =
|
|
|
dst[i + dststep*(size.height-2)] =
|
|
|
dst[i + dststep*(size.height-1)] = dst[i + dststep*(size.height-5)];
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T, typename SIMDInterpolator>
|
|
|
class Bayer2RGB_EdgeAware_T_Invoker :
|
|
|
public cv::ParallelLoopBody
|
|
|
{
|
|
|
public:
|
|
|
Bayer2RGB_EdgeAware_T_Invoker(const Mat& _src, Mat& _dst, const Size& _size,
|
|
|
int _blue, int _start_with_green) :
|
|
|
ParallelLoopBody(),
|
|
|
src(_src), dst(_dst), size(_size), Blue(_blue), Start_with_green(_start_with_green)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
virtual void operator()(const Range& range) const CV_OVERRIDE
|
|
|
{
|
|
|
int dcn = dst.channels();
|
|
|
int dcn2 = dcn<<1;
|
|
|
int start_with_green = Start_with_green, blue = Blue;
|
|
|
int sstep = int(src.step / src.elemSize1()), dstep = int(dst.step / dst.elemSize1());
|
|
|
SIMDInterpolator vecOp;
|
|
|
|
|
|
const T* S = src.ptr<T>(range.start + 1) + 1;
|
|
|
T* D = reinterpret_cast<T*>(dst.data + (range.start + 1) * dst.step) + dcn;
|
|
|
|
|
|
if (range.start % 2)
|
|
|
{
|
|
|
start_with_green ^= 1;
|
|
|
blue ^= 1;
|
|
|
}
|
|
|
|
|
|
|
|
|
for (int y = range.start; y < range.end; ++y)
|
|
|
{
|
|
|
int x = 1;
|
|
|
if (start_with_green)
|
|
|
{
|
|
|
D[blue<<1] = (S[-sstep] + S[sstep] + 1) >> 1;
|
|
|
D[1] = S[0];
|
|
|
D[2-(blue<<1)] = (S[-1] + S[1] + 1) >> 1;
|
|
|
D += dcn;
|
|
|
++S;
|
|
|
++x;
|
|
|
}
|
|
|
|
|
|
int delta = vecOp.bayer2RGB_EA(S - sstep - 1, sstep, D, size.width, blue);
|
|
|
x += delta;
|
|
|
S += delta;
|
|
|
D += dcn * delta;
|
|
|
|
|
|
if (blue)
|
|
|
for (; x < size.width; x += 2, S += 2, D += dcn2)
|
|
|
{
|
|
|
D[0] = S[0];
|
|
|
D[1] = (std::abs(S[-1] - S[1]) > std::abs(S[sstep] - S[-sstep]) ? (S[sstep] + S[-sstep] + 1) : (S[-1] + S[1] + 1)) >> 1;
|
|
|
D[2] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1] + 2) >> 2;
|
|
|
|
|
|
D[3] = (S[0] + S[2] + 1) >> 1;
|
|
|
D[4] = S[1];
|
|
|
D[5] = (S[-sstep+1] + S[sstep+1] + 1) >> 1;
|
|
|
}
|
|
|
else
|
|
|
for (; x < size.width; x += 2, S += 2, D += dcn2)
|
|
|
{
|
|
|
D[0] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1] + 2) >> 2;
|
|
|
D[1] = (std::abs(S[-1] - S[1]) > std::abs(S[sstep] - S[-sstep]) ? (S[sstep] + S[-sstep] + 1) : (S[-1] + S[1] + 1)) >> 1;
|
|
|
D[2] = S[0];
|
|
|
|
|
|
D[3] = (S[-sstep+1] + S[sstep+1] + 1) >> 1;
|
|
|
D[4] = S[1];
|
|
|
D[5] = (S[0] + S[2] + 1) >> 1;
|
|
|
}
|
|
|
|
|
|
if (x <= size.width)
|
|
|
{
|
|
|
D[blue<<1] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1] + 2) >> 2;
|
|
|
D[1] = (std::abs(S[-1] - S[1]) > std::abs(S[sstep] - S[-sstep]) ? (S[sstep] + S[-sstep] + 1) : (S[-1] + S[1] + 1)) >> 1;
|
|
|
D[2-(blue<<1)] = S[0];
|
|
|
D += dcn;
|
|
|
++S;
|
|
|
}
|
|
|
|
|
|
for (int i = 0; i < dcn; ++i)
|
|
|
{
|
|
|
D[i] = D[-dcn + i];
|
|
|
D[-dstep+dcn+i] = D[-dstep+(dcn<<1)+i];
|
|
|
}
|
|
|
|
|
|
start_with_green ^= 1;
|
|
|
blue ^= 1;
|
|
|
S += 2;
|
|
|
D += dcn2;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
private:
|
|
|
Mat src;
|
|
|
Mat dst;
|
|
|
Size size;
|
|
|
int Blue, Start_with_green;
|
|
|
};
|
|
|
|
|
|
template <typename T, typename SIMDInterpolator>
|
|
|
static void Bayer2RGB_EdgeAware_T(const Mat& src, Mat& dst, int code)
|
|
|
{
|
|
|
Size size = src.size();
|
|
|
|
|
|
|
|
|
if (size.width <= 2 || size.height <= 2)
|
|
|
{
|
|
|
dst = Scalar::all(0);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
size.width -= 2;
|
|
|
size.height -= 2;
|
|
|
|
|
|
int start_with_green = code == COLOR_BayerGB2BGR_EA || code == COLOR_BayerGR2BGR_EA ? 1 : 0;
|
|
|
int blue = code == COLOR_BayerGB2BGR_EA || code == COLOR_BayerBG2BGR_EA ? 1 : 0;
|
|
|
|
|
|
if (size.height > 0)
|
|
|
{
|
|
|
Bayer2RGB_EdgeAware_T_Invoker<T, SIMDInterpolator> invoker(src, dst, size, blue, start_with_green);
|
|
|
Range range(0, size.height);
|
|
|
parallel_for_(range, invoker, dst.total()/static_cast<double>(1<<16));
|
|
|
}
|
|
|
size = dst.size();
|
|
|
size.width *= dst.channels();
|
|
|
size_t dstep = dst.step / dst.elemSize1();
|
|
|
T* firstRow = dst.ptr<T>();
|
|
|
T* lastRow = dst.ptr<T>() + (size.height-1) * dstep;
|
|
|
|
|
|
if (size.height > 2)
|
|
|
{
|
|
|
for (int x = 0; x < size.width; ++x)
|
|
|
{
|
|
|
firstRow[x] = (firstRow+dstep)[x];
|
|
|
lastRow[x] = (lastRow-dstep)[x];
|
|
|
}
|
|
|
}
|
|
|
else
|
|
|
for (int x = 0; x < size.width; ++x)
|
|
|
firstRow[x] = lastRow[x] = 0;
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void cv::demosaicing(InputArray _src, OutputArray _dst, int code, int dcn)
|
|
|
{
|
|
|
CV_INSTRUMENT_REGION();
|
|
|
|
|
|
Mat src = _src.getMat(), dst;
|
|
|
Size sz = src.size();
|
|
|
int scn = src.channels(), depth = src.depth();
|
|
|
|
|
|
CV_Assert(depth == CV_8U || depth == CV_16U);
|
|
|
CV_Assert(!src.empty());
|
|
|
|
|
|
switch (code)
|
|
|
{
|
|
|
case COLOR_BayerBG2GRAY: case COLOR_BayerGB2GRAY: case COLOR_BayerRG2GRAY: case COLOR_BayerGR2GRAY:
|
|
|
if (dcn <= 0)
|
|
|
dcn = 1;
|
|
|
CV_Assert( scn == 1 && dcn == 1 );
|
|
|
|
|
|
_dst.create(sz, CV_MAKETYPE(depth, dcn));
|
|
|
dst = _dst.getMat();
|
|
|
|
|
|
if( depth == CV_8U )
|
|
|
Bayer2Gray_<uchar, SIMDBayerInterpolator_8u>(src, dst, code);
|
|
|
else if( depth == CV_16U )
|
|
|
Bayer2Gray_<ushort, SIMDBayerStubInterpolator_<ushort> >(src, dst, code);
|
|
|
else
|
|
|
CV_Error(cv::Error::StsUnsupportedFormat, "Bayer->Gray demosaicing only supports 8u and 16u types");
|
|
|
break;
|
|
|
|
|
|
case COLOR_BayerBG2BGRA: case COLOR_BayerGB2BGRA: case COLOR_BayerRG2BGRA: case COLOR_BayerGR2BGRA:
|
|
|
if (dcn <= 0)
|
|
|
dcn = 4;
|
|
|
|
|
|
case COLOR_BayerBG2BGR: case COLOR_BayerGB2BGR: case COLOR_BayerRG2BGR: case COLOR_BayerGR2BGR:
|
|
|
case COLOR_BayerBG2BGR_VNG: case COLOR_BayerGB2BGR_VNG: case COLOR_BayerRG2BGR_VNG: case COLOR_BayerGR2BGR_VNG:
|
|
|
{
|
|
|
if (dcn <= 0)
|
|
|
dcn = 3;
|
|
|
CV_Assert( scn == 1 && (dcn == 3 || dcn == 4) );
|
|
|
|
|
|
_dst.create(sz, CV_MAKE_TYPE(depth, dcn));
|
|
|
Mat dst_ = _dst.getMat();
|
|
|
|
|
|
if( code == COLOR_BayerBG2BGR || code == COLOR_BayerBG2BGRA ||
|
|
|
code == COLOR_BayerGB2BGR || code == COLOR_BayerGB2BGRA ||
|
|
|
code == COLOR_BayerRG2BGR || code == COLOR_BayerRG2BGRA ||
|
|
|
code == COLOR_BayerGR2BGR || code == COLOR_BayerGR2BGRA )
|
|
|
{
|
|
|
if( depth == CV_8U )
|
|
|
Bayer2RGB_<uchar, SIMDBayerInterpolator_8u>(src, dst_, code);
|
|
|
else if( depth == CV_16U )
|
|
|
Bayer2RGB_<ushort, SIMDBayerStubInterpolator_<ushort> >(src, dst_, code);
|
|
|
else
|
|
|
CV_Error(cv::Error::StsUnsupportedFormat, "Bayer->RGB demosaicing only supports 8u and 16u types");
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
CV_Assert( depth == CV_8U );
|
|
|
Bayer2RGB_VNG_8u(src, dst_, code);
|
|
|
}
|
|
|
}
|
|
|
break;
|
|
|
|
|
|
case COLOR_BayerBG2BGR_EA: case COLOR_BayerGB2BGR_EA: case COLOR_BayerRG2BGR_EA: case COLOR_BayerGR2BGR_EA:
|
|
|
if (dcn <= 0)
|
|
|
dcn = 3;
|
|
|
|
|
|
CV_Assert(scn == 1 && dcn == 3);
|
|
|
_dst.create(sz, CV_MAKETYPE(depth, dcn));
|
|
|
dst = _dst.getMat();
|
|
|
|
|
|
if (depth == CV_8U)
|
|
|
Bayer2RGB_EdgeAware_T<uchar, SIMDBayerInterpolator_8u>(src, dst, code);
|
|
|
else if (depth == CV_16U)
|
|
|
Bayer2RGB_EdgeAware_T<ushort, SIMDBayerStubInterpolator_<ushort> >(src, dst, code);
|
|
|
else
|
|
|
CV_Error(cv::Error::StsUnsupportedFormat, "Bayer->RGB Edge-Aware demosaicing only currently supports 8u and 16u types");
|
|
|
|
|
|
break;
|
|
|
|
|
|
default:
|
|
|
CV_Error( cv::Error::StsBadFlag, "Unknown / unsupported color conversion code" );
|
|
|
}
|
|
|
}
|
|
|
|