source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_MIPS_MSA_NATIVE)
v16i8 msa_i8;
v8i16 msa_i16;
v4i32 msa_i32;
v2i64 msa_i64;
v16u8 msa_u8;
v8u16 msa_u16;
v4u32 msa_u32;
v2u64 msa_u64;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U };
r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_broadcastlow_ps(simde__m128 a) {
/* This function broadcasts the first element in the inpu vector to
* all lanes. It is used to avoid generating spurious exceptions in
* *_ss functions since there may be garbage in the upper lanes. */
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_shuffle_ps(a, a, 0);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_splat(a_.altivec_f32, 0);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[0];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) && !defined(SIMDE_BUG_GCC_100761)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if defined(SIMDE_BUG_CLANG_44589)
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if defined(SIMDE_BUG_CLANG_44589)
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_pmin(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
uint32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < b_.f32);
r_.f32 =
HEDLEY_REINTERPRET_CAST(
__typeof__(r_.f32),
( (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32) & m) |
(HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32) & ~m)
)
);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
static const uint32_t md[4] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3
};
uint32x4_t extended = vreinterpretq_u32_s32(vshrq_n_s32(a_.neon_i32, 31));
uint32x4_t masked = vandq_u32(vld1q_u32(md), extended);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(int32_t, vaddvq_u32(masked));
#else
uint64x2_t t64 = vpaddlq_u32(masked);
return
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 0)) +
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 1));
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx));
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx);
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint64x1_t t = vpaddl_u32(vpaddl_u16(vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8))));
r_.neon_u16 = vset_lane_u16(HEDLEY_STATIC_CAST(uint64_t, vget_lane_u64(t, 0)), vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, simde_math_abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shuffle_ps(a, b, imm8) \
(__extension__({ \
float32x4_t simde_mm_shuffle_ps_a_ = simde__m128i_to_neon_f32(a); \
float32x4_t simde_mm_shuffle_ps_b_ = simde__m128i_to_neon_f32(b); \
float32x4_t simde_mm_shuffle_ps_r_; \
\
simde_mm_shuffle_ps_r_ = vmovq_n_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, (imm8) & (0x3))); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_ps_r_, 1); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_ps_r_, 2); \
vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_ps_r_, 3); \
}))
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp0; \
SIMDE_MM_TRANSPOSE4_PS_tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
SIMDE_MM_TRANSPOSE4_PS_tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp0, SIMDE_MM_TRANSPOSE4_PS_tmp2); \
row1 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp0); \
row2 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp3); \
row3 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
MatrixMultiplication.c | #include<omp.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
typedef double ttype;
#define rand 1111
ttype tdiff(struct timespec a, struct timespec b)
{
ttype dt = (( b.tv_sec - a.tv_sec ) + ( b.tv_nsec - a.tv_nsec ) / 1E9); //Finding the time difference (copied from sample file on e-learning)
return dt;
}
struct timespec now()
{
struct timespec t;
clock_gettime(clock, &t);
return t;
}
int main(int argc, char *argv[])
{
int i,j,k;
srand(rand);
struct timespec begin, end;
double time_spent;
int a1,a2,a3,a4;;
int num_threads = atof(argv[1]);
a1 = atof(argv[2]);
a2 = atof(argv[3]);
a3 = atof(argv[4]);
a4 = atof(argv[5]);
int **A =(int **)malloc(a1 * sizeof(int*)); //dynamically allocating space for matrix A
for(i=0;i<a1;i++)
{
A[i] = (int *)malloc(a2 * sizeof(int));
}
for(i=0;i<a1;i++)
{
for(j=0;j<a2;j++)
{
A[i][j]= rand(); // Initializing matrix A with random values
}
}
int **B = (int **)malloc(a3 * sizeof(int*)); //dynamically allocating space for matrix B
for(i=0;i<a3;i++)
{
B[i]=(int *)malloc(a4*sizeof(int));
}
for(i=0;i<a3;i++)
{
for(j=0;j<a4;j++)
{
B[i][j]=rand(); // Initializing matrix A with random values
}
}
int **C=(int **)malloc(a1*sizeof(int*)); //dynamically allocating space for matrix C
for(i=0;i<a1;i++)
{
C[i]=(int *)malloc(a4*sizeof(int));
}
for(i=0;i<a1;i++)
{
for(j=0;j<a4;j++)
{
C[i][j] = 0;
}
}
omp_set_num_threads(num_threads);
begin = now(); //start calculating the time
if(a2 != a3)
{
printf("Cannot Multipy matrices \n"); //checking for order of matrices to tbe multiplied.
}
else
{
for(i=0;i<a1;i++)
{
//Parallel execution starts
#pragma omp parallel for shared(a1,a4,a2) private(j,k) firstprivate(A,B)
for(k=0;k<a4;k++)
{
for(j=0;j<a2;j++)
{
C[i][k]+= A[i][j]*B[j][k];
}
}
}
}
end = now();
time_spent = tdiff(begin, end);
printf("Total time in execution is %.8f sec\n", time_spent);
return 0;
}
|
GB_unaryop__abs_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int64
// op(A') function: GB_tran__abs_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int64
(
bool *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi7_5_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _NB_2 1
#define _TH_1 2
#include <omp.h>
#define _NB_1 1
#define Index3D(_nx,_ny,_i,_j,_k) ((_i)+_nx*((_j)+_ny*(_k)))
void jacobi7(const int nx,const int ny,int nz,const double alpha,double* A0,const int timesteps,const double* B,const int ldb,double* Anext,const int ldc) {
double fac;
double* temp_ptr;
int i;int j;int k;int t;
fac = 6.0/(A0[0]*A0[0]);
int k_bk_1;
int k_bk_2;
/*@;BEGIN(Nest1=Nest)@*/for (t=0; t<timesteps; t+=1)
{
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest2_group3=Nest)@*/#pragma omp for private(k,j,i,k_bk_1,k_bk_2)
for (k_bk_1=1; k_bk_1<nz-1; k_bk_1+=_NB_1)
{
/*@;BEGIN(Nest2=Nest)@*/for (k_bk_2=0; k_bk_2<min(_NB_1,-k_bk_1+(-1+nz)); k_bk_2+=_NB_2)
{
for (k=0; k<min(_NB_2,min(_NB_1-k_bk_2,-k_bk_2+(-k_bk_1+(-1+nz)))); k+=1)
{
/*@;BEGIN(Nest3=Nest)@*/for (j=1; j<-1+ny; j+=1)
{
/*@;BEGIN(Nest4=Nest)@*/for (i=1; i<-1+nx; i+=1)
{
Anext[Index3D(nx,ny,i,j,k_bk_1+(k_bk_2+k))] = -(A0[Index3D(nx,ny,i,j,k_bk_1+(k_bk_2+k))]*fac)+(A0[Index3D(nx,ny,-1+i,j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,1+i,j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i,-1+j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i,1+j,k_bk_1+(k_bk_2+k))]+(A0[Index3D(nx,ny,i,j,1+(k_bk_1+(k_bk_2+k)))]+A0[Index3D(nx,ny,i,j,-1+(k_bk_1+(k_bk_2+k)))])))));
}
}
}
}
}
}
temp_ptr = A0;
A0 = Anext;
Anext = temp_ptr;
}
};
|
rawKeccak_256_fmt_plug.c | /* Keccak-256 cracker patch for JtR. Hacked together during May of 2013
* by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Usage: john --format:raw-keccak-256 <hash file>
*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2012 by Solar Designer
*
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawKeccak_256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawKeccak_256);
#else
#include <string.h>
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "KeccakHash.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_TAG "$keccak256$"
#define TAG_LENGTH 11
#define FORMAT_LABEL "Raw-Keccak-256"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"},
{"$keccak256$4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"},
{"$keccak256$3b673b24a64aebb286f193e5c985c8e528db8590f997d9130889ca7f5f4cfe6e", "passWOrd"},
{"$keccak256$2a359feeb8e488a1af2c03b908b3ed7990400555db73e1421181d97cac004d48", "123456789"},
{"$keccak256$c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", ""},
{NULL}
};
static int (*saved_len);
// the Keccak function can read up to next even 8 byte offset.
// making the buffer larger avoid reading past end of buffer
static char (*saved_key)[(((PLAINTEXT_LENGTH+1)+7)/8)*8];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(out + TAG_LENGTH);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext + TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
Keccak_HashInstance hash;
Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01);
Keccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8);
Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawKeccak_256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"Keccak 256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <string>
#include <vector>
#include <memory>
#include <map>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool is_predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = int(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = int(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool is_predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool is_predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060) */
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
GB_unop__asin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asin_fc64_fc64
// op(A') function: GB_unop_tran__asin_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casin (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asin_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asin_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
androidfde_fmt_plug.c | /* androidfde.c
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu>
*
* Modified for JtR and made stuff more generic
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_fde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_fde);
#else
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "os.h"
#include "stdint.h"
#include <stdlib.h>
#include <sys/types.h>
#include "aes.h"
#include <string.h>
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memory.h"
#include "pbkdf2_hmac_sha1.h"
// NOTE, this format FAILS for generic sha2. It could be due to interaction between openssl/aes and generic sha2 code.
#include "sha2.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_TAG "$fde$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define FORMAT_LABEL "fde"
#define FORMAT_NAME "Android FDE"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " SHA256/AES"
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define PLAINTEXT_LENGTH 64
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests fde_tests[] = {
{"$fde$16$04b36d4290b56e0fcca9778b74719ab8$16$b45f0f051f13f84872d1ef1abe0ada59$0f61d28f7466c0435040cc845a67e6734500de15df3ba6f48d2534ca2a7b8f910d7547357e8f1ec7364bab41383f5df9b5fb43fcd4a1e06189ce3c6ba77ec908b066e73a508e201c941fb409e9abdc051c3c052a735b01e56be61efa635e82cbceab18db1ba645b93f7befb83155852f0004a7c7d6800e9fa5f0d3c133dd2496f92110c3cdcfb16dcf57df8de830969e18514a34d4917de14597da19f9f7dc81eca2d7d461c91e0a8aeac06bafe89866d24f2b4991b4295b6277d0ff4ad97f1fa58e20f8a24e2062f84c318eb36cfbb4671117bc3522afcf7737353589cae0dce0d7c3341f457af654543758f3f005bd4d68fa2b35777cb2ea5f8f69c4debcfb1d8b2a601320e4f8621dc6e99434007388bdc0ceebc722f9ed44cbce3914bf144db332276e719f6b48108cde55916d861d19dc8c03ac76a2dad322457073111e441488228f13649073aa3aadfab51dadf89a0827acba284154a9e18d926facef43852a0733660a1fbcca8e81d2f41efd9f645a61f9395b75fc7ad446885d304808d511f2ba2e7c6138588c4292aee4ef6f2537bb00c7b015cee4a91d2defa87b67abc1315e71f0489e271673b36412377219e93aba6af3cfd504bf3f6bc24f2b6148536339d91ddd2f013314544650c1c11e7317028a7014909d0c850f78692e476c4f57da586fe26786504130aba22ba5261b989aeb47483d8cb9d5052120a4e5690b5b0cd009aadaadc351db7b6a230ebc1fa771651cb64d78daa56b7a6c6808db3b688afee9b7edaa617d8cb16ac7290465987bd443ea41ce38aa14e0c88874fb2707394b83679de82134efe351b4d021c63b2992a8314b2e93908906400628a7f753c9a4d85e917a207561b7840ce121800fab4026508d1b00fe8e7e756573743e11380f76f6bb7c0e528cb98875e6ad88bff51236601e6942964e37ffe0316b1a1f7bc0d84334fa024bf03c261bd06a07c01f099ad23fb9a1d8c98447463b8988cb33f3e1fb7d7a7c547f9a6d51cf7b75649d3c8cb5bf93be79eba1a961659b5fe928a1c7e80aca857825c6bc11493cb230e66126ef7b7284abe0823b5735bb1dfe844029f175c63442ca774784b775ecf02e48d029ac0f236813be91aca66905640666b89bd08118e3c18c75764bc49d00d1fe53ee92ccaa487852c613cba91f637b6de06dcaa1953a7cfb5333df573273a67f0157b63fbbf48c48f16c423caefaf29cdb5d34b19ac0f57b972b9e5ff1bc5cf25bdcdf8d29fb75865c4501458f19bfd64c844fd52a27feec97dc31ba922aea75706404d853071707d0c6001c59664676be6426ca5c7efbfc09ffa9acac91441f9175fd3148fb046c31a49d7c7ad10bf3c4b413dd148666b72b5a533f600cb02d7623270e5d1ad33355dd318d06aa8b3d7517cb7d5be40d222a026380cfbf5b79014e7631d677b07bcd805d9ea7103cf1d057bf883b29fb99b064c4e3cb4271596a74895c1c3f7c7c49d2be54b1435af4440ecd019dde11cee14a320712c9275bef339a15d3a18d9f38918d7af0a50a35199980429d74d4cc2a16dea619619a7c19827f4f78d3ebaf13340abf6717cec6bff8399b067fb17f11cdb1f9909c51253f7466ee769546d1d96319bcc1b04a6b1f8d8068f96b959d507c9004d75717792733fadb7a94a2d5db514a61cbd90eef89d1ace5a3138120168d62f1ebef5efbbd4e7f7e987834db81fe8c4877f3edcc71c61e96b20ca26c5a91e28fa11e484c1dcbfd5a0461065fe52f042ee9a09687d800c90a0a792f3dbe257965247f8eecd122b9b234b734454fa1477212a0295a347ae44463de4de405bf4fd91cde400b63d7fced6d7ccd20d79a4899139a79085f8742c3dfe7fbadca56c4e8aa95ce7841ad9675659349f6671d047efa0951feb9c61381f5f9e39182c1ec0a3ebd2ef5e036312c6ed6a0e59777813229ffdac771788e609c7d9f96848f63b428789c55e85c509068df8d5a0a7fc066be8c76205860d86d6c5bb7c2bc85a922a2ad86e6a791fe238420eedd1cf7ac770dd8316ca30c9577441a34873cdf0c5dc2103457a93fa0dd42da5eb2d6f82e9ff47b4bb6cd1d3fcba5645caace577a89c7bd70ff432f8dae113a7877a41a41043dac4c0d21860ad8198a1b9640d979322a20d4b90caa77a5d2b31c5bd06e", "strongpassword"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int max_cracked;
static struct custom_salt {
int loaded;
unsigned char *cipherbuf;
int keysize;
int iterations; // NOTE, not used. Hard coded to 2000 for FDE from droid <= 4.3 (PBKDF2-sha1)
int saltlen;
unsigned char data[512 * 3];
unsigned char salt[16];
unsigned char mkey[64];
unsigned char iv[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
max_cracked = self->params.max_keys_per_crypt;
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int saltlen, keysize, extra;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if (saltlen > 16) /* saltlen */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != saltlen * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* keysize */
goto err;
if (!isdec(p))
goto err;
keysize = atoi(p);
if (keysize > 64)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* key */
goto err;
if (hexlenl(p, &extra) != keysize * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (hexlenl(p, &extra) != 512 * 3 * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
// int res;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.saltlen; i++) {
cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtokm(NULL, "$");
cs.keysize = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.keysize; i++) {
cs.mkey[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtokm(NULL, "$");
for (i = 0; i < 512 * 3; i++) {
cs.data[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// Not reference implementation - this is modified for use by androidfde!
static void AES_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size)
{
AES_KEY aeskey;
unsigned char essiv[16];
unsigned char essivhash[32];
SHA256_CTX ctx;
unsigned char sectorbuf[16];
unsigned char zeroiv[16];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, cur_salt->keysize);
SHA256_Final(essivhash, &ctx);
memset(sectorbuf,0,16);
memset(zeroiv,0,16);
memset(essiv,0,16);
memcpy(sectorbuf,&startsector,4);
AES_set_encrypt_key(essivhash, 256, &aeskey);
AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT);
AES_set_decrypt_key(key, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT);
}
// cracked[index] = hash_plugin_check_hash(saved_key[index]);
void hash_plugin_check_hash(int index)
{
unsigned char keycandidate2[255];
unsigned char decrypted1[512]; // FAT
unsigned char decrypted2[512]; // ext3/4
AES_KEY aeskey;
uint16_t v2,v3,v4;
uint32_t v1,v5;
int j = 0;
#ifdef SIMD_COEF_32
unsigned char *keycandidate, Keycandidate[SSE_GROUP_SZ_SHA1][255];
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (ARCH_WORD_32*)(Keycandidate[i]);
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 16,
2000, &(x.poutc), cur_salt->keysize + 16, 0);
#else
unsigned char keycandidate[255];
char *password = saved_key[index];
pbkdf2_sha1((const uint8_t*)password, strlen(password), (const uint8_t*)(cur_salt->salt),
16, 2000, keycandidate, cur_salt->keysize + 16, 0);
#endif
j = 0;
#ifdef SIMD_COEF_32
for (; j < SSE_GROUP_SZ_SHA1; ++j) {
keycandidate = Keycandidate[j];
#endif
AES_set_decrypt_key(keycandidate, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(cur_salt->mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT);
AES_cbc_essiv(cur_salt->data, decrypted1, keycandidate2,0,32);
AES_cbc_essiv(cur_salt->data + 1024, decrypted2, keycandidate2,2,128);
// Check for FAT
if ((memcmp(decrypted1+3,"MSDOS5.0",8)==0))
cracked[index+j] = 1;
else {
// Check for extfs
memcpy(&v1,decrypted2+72,4);
memcpy(&v2,decrypted2+0x3a,2);
memcpy(&v3,decrypted2+0x3c,2);
memcpy(&v4,decrypted2+0x4c,2);
memcpy(&v5,decrypted2+0x48,4);
#if !ARCH_LITTLE_ENDIAN
v1 = JOHNSWAP(v1);
v2 = JOHNSWAP(v2);
v3 = JOHNSWAP(v3);
v4 = JOHNSWAP(v4);
v5 = JOHNSWAP(v5);
#endif
if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5))
cracked[index+j] = 1;
}
#ifdef SIMD_COEF_32
}
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*max_cracked);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
hash_plugin_check_hash(index);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void fde_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_fde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
fde_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
fde_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Physics.c | /*
* Physics.c
*
* Created on: Feb 24, 2016
* Author: abauville
*/
#include "stokes.h"
void Physics_Memory_allocate(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
BC* BCStokes = &(Model->BCStokes);
Physics->dt = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->dtAdv = Numerics->dtIni;
Physics->dtT = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->dtDarcy = Numerics->dtIni;
//Physics->dtAdv = 1.0;
Numerics->dtPrevTimeStep = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->epsRef = fabs(BCStokes->backStrainRate);
if (Physics->epsRef == 0)
Physics->epsRef = 1E0;
Physics->maxVx = (Grid->xmax-Grid->xmin)/Physics->epsRef;
Physics->maxVy = (Grid->ymax-Grid->ymin)/Physics->epsRef;
int i;
Physics->phaseListHead = (SinglePhase**) malloc( Grid->nECTot * sizeof( SinglePhase* ) ); // array of pointers to particles
for (i=0;i<Grid->nECTot;i++) {
Physics->phaseListHead[i] = (SinglePhase*) malloc( 1 * sizeof( SinglePhase ) );
Physics->phaseListHead[i]->phase = -1;
Physics->phaseListHead[i]->weight = 0;
Physics->phaseListHead[i]->next = NULL;
}
Physics->sumOfWeightsCells = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sumOfWeightsNodes = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->Vx = (compute*) malloc( Grid->nVxTot * sizeof(compute) );
Physics->Vy = (compute*) malloc( Grid->nVyTot * sizeof(compute) );
#if (INERTIA)
Physics->Vx0 = (compute*) malloc( Grid->nVxTot * sizeof(compute) );
Physics->Vy0 = (compute*) malloc( Grid->nVyTot * sizeof(compute) );
#endif
Physics->P = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Z = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->eta = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->khi = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Lambda = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->rho = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#if (STORE_PLASTIC_STRAIN)
Physics->strain = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dstrain = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dvorticity_cum = (compute*) malloc( Grid->nSTot * sizeof(compute) );
#endif
#if (EXTRA_PART_FIELD)
Physics->extraField = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (HEAT)
Physics->k = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->T = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->T0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DT = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (DARCY)
Physics->Pc = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->divV0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DeltaP0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DDeltaP = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Pf = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->phi = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->phi0 = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->Dphi = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->perm0_eta_f = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // permeability
Physics->perm_eta_f = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // permeability
Physics->eta_b = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk viscosity
Physics->khi_b = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk plasticity
Physics->Zb = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk effective viscosity
#endif
Physics->G = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sigma_xx_0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sigma_xy_0 = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->Dsigma_xx_0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dsigma_xy_0 = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->khiShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->GShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->etaShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->ZShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->LambdaShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->EII_eff = (compute*) malloc(Grid->nECTot*sizeof(compute));
Physics->EII_effShear = (compute*) malloc(Grid->nSTot*sizeof(compute));
Physics->Tau_y = (compute*) malloc(Grid->nECTot*sizeof(compute));
Physics->Tau_yShear = (compute*) malloc(Grid->nSTot*sizeof(compute));
Physics->phase = (int*) malloc( Grid->nECTot * sizeof(int) );
Physics->volumeChange = (compute*) malloc( Grid->nECTot * sizeof(compute) );
// Initialize stuff
//int i;
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx[i] = 0.0;
#if (INERTIA)
Physics->Vx0[i] = 0.0;
#endif
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy[i] = 0.0;
#if (INERTIA)
Physics->Vy0[i] = 0.0;
#endif
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nECTot; ++i) {
Physics->khi[i] = 1e30;
//Physics->Eps_pxx[i] = 0.0;
#if (STORE_PLASTIC_STRAIN)
Physics->strain[i] = 0.0;
Physics->Dstrain[i] = 0.0;
Physics->Dvorticity_cum[i] = 0.0;
#endif
Physics->volumeChange[i]=0.0;
#if (HEAT)
Physics->T[i] = 1.0;
Physics->DT[i] = 0.0;
#endif
Physics->P[i] = 0.0;
#if (DARCY)
Physics->divV0[i] = 0.0;
Physics->Pf [i] = 0.0;
Physics->Pc [i] = 0.0;
Physics->DeltaP0 [i] = 0.0;
Physics->DDeltaP [i] = 0.0;
Physics->phi [i] = 0.0;
Physics->phi0[i] = 0.0;
#endif
Physics->sigma_xx_0[i] = 0.0;
Physics->Dsigma_xx_0[i] = 0.0;
Physics->Lambda[i] = 1.0;
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nSTot; ++i) {
Physics->sigma_xy_0[i] = 0.0;
Physics->Dsigma_xy_0[i] = 0.0;
Physics->LambdaShear[i] = 1.0;
}
Physics->dtMaxwellMin = 1E+100;
Physics->dtMaxwellMax = 1E-100;
}
void Physics_Memory_free(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// Free phaseList
printf("free phase list\n");
int iCell;
SinglePhase* temp;
for (iCell=0;iCell<Grid->nECTot;iCell++) {
while (Physics->phaseListHead[iCell] != NULL)
{
temp = Physics->phaseListHead[iCell];
Physics->phaseListHead[iCell] = Physics->phaseListHead[iCell]->next;
free(temp);
}
}
free( Physics->phaseListHead );
printf("free V\n");
free(Physics->Vx);
free(Physics->Vy);
#if (INERTIA)
free(Physics->Vx0);
free(Physics->Vy0);
#endif
printf("free P\n");
free(Physics->P );
printf("free Z\n");
free(Physics->Z);
free( Physics->ZShear );
printf("free eta\n");
free( Physics->eta );
free(Physics->etaShear);
printf("free khi\n");
free( Physics->khi );
free( Physics->khiShear );
printf("free G\n");
free(Physics->G );
free( Physics->GShear );
printf("free Lambda\n");
free( Physics->Lambda );
free(Physics->LambdaShear);
free( Physics->rho );
printf("free EII\n");
free(Physics->EII_eff);
free(Physics->EII_effShear);
printf("free Tau_y\n");
free(Physics->Tau_y);
free(Physics->Tau_yShear);
printf("strain\n");
#if (STORE_PLASTIC_STRAIN)
free(Physics->strain);
free(Physics->Dstrain);
free(Physics->Dvorticity_cum);
#endif
#if (EXTRA_PART_FIELD)
Physics->extraField = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (HEAT)
free( Physics->k );
free(Physics->T );
free(Physics->T0);
free(Physics->DT );
#endif
printf("free phase\n");
free(Physics->phase);
free(Physics->sigma_xx_0 );
free(Physics->sigma_xy_0 );
free(Physics->Dsigma_xx_0 );
free(Physics->Dsigma_xy_0 );
#if (DARCY)
free(Physics->Pc);
free(Physics->divV0);
free(Physics->DeltaP0);
free(Physics->DDeltaP);
free(Physics->Pf);
free(Physics->phi);
free(Physics->Dphi);
free(Physics->phi0);
free(Physics->perm0_eta_f);
free(Physics->perm_eta_f);
free(Physics->eta_b);
free(Physics->Zb);
free(Physics->khi_b);
#endif
free(Physics->sumOfWeightsCells);
free(Physics->sumOfWeightsNodes);
}
void Physics_Phase_addSingle(SinglePhase** pointerToHead, int phase)
{
// Adds a Particle at the beginning of a linked list
SinglePhase* thisPhase = (SinglePhase*) malloc(sizeof(SinglePhase));
thisPhase->phase = phase;
thisPhase->weight = 0.0;
thisPhase->next = NULL;
if (*pointerToHead != NULL) {
thisPhase->next = *pointerToHead;
}
*pointerToHead = thisPhase;
}
void Physics_P_initToLithostatic(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iy, ix, iCell, iCellS, iCellN, iCellW, iCellE;
compute rho_g_h = 0.0;
//compute stress = 0.0;
// Contribution of gy
if (Physics->g[1]>0){
for (ix = 0; ix < Grid->nxEC; ++ix) {
for (iy = 0; iy < Grid->nyEC; ++iy) {
iCell = ix + iy*Grid->nxEC;
iCellS = ix + (iy-1)*Grid->nxEC;
if (iy==0) {
rho_g_h = Physics->rho[iCell] * Physics->g[1] * (-0.5*Grid->DYEC[iy] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellS]) * Physics->g[1] * Grid->DYEC[iy-1] ;
}
Physics->P[iCell] = rho_g_h;
}
}
} else {
for (ix = 0; ix < Grid->nxEC; ++ix) {
for (iy = Grid->nyEC-1; iy >= 0; --iy) {
iCell = ix + iy*Grid->nxEC;
iCellN = ix + (iy+1)*Grid->nxEC;
iCellS = ix + (iy-1)*Grid->nxEC;
if (iy==Grid->nyEC-1) {
rho_g_h = Physics->rho[iCell] * -Physics->g[1] * (-0.5*Grid->DYEC[iy-1] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellN]) * -Physics->g[1] * Grid->DYEC[iy] ;
}
Physics->P[iCell] = rho_g_h;
}
}
}
if (abs(Physics->g[0])>1E-8) {
// Contribution of gx
if (Physics->g[0]>0){
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + iy*Grid->nxEC;
iCellW = ix-1 + (iy)*Grid->nxEC;
if (ix==0) {
rho_g_h = Physics->rho[iCell] * Physics->g[0] * (-0.5*Grid->DXEC[ix] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellW]) * Physics->g[0] * Grid->DXEC[ix-1] ;
}
Physics->P[iCell] += rho_g_h;
}
}
} else {
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = Grid->nxEC-1; ix >= 0; --ix) {
iCell = ix + iy*Grid->nxEC;
iCellE = ix+1 + (iy)*Grid->nxEC;
iCellW = ix-1 + (iy)*Grid->nxEC;
if (ix==Grid->nxEC-1) {
rho_g_h = Physics->rho[iCell] * -Physics->g[0] * (-0.5*Grid->DXEC[ix-1] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellE]) * -Physics->g[0] * Grid->DXEC[ix] ;
}
Physics->P[iCell] += rho_g_h;
}
}
}
}
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
#if (DARCY)
Physics->Pf[iCell] = Physics->P[iCell];
Physics->Pc[iCell] = 0.0;
Physics->DeltaP0[iCell] = 0.0;
Physics->DDeltaP[iCell] = 0.0;
#endif
}
}
void Physics_Velocity_advectEulerian(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numbering* NumStokes = &(Model->NumStokes);
int ix, iy;
compute dVxdx, dVxdy, dVydx, dVydy;
compute dVxdx0, dVxdy0, dVydx0, dVydy0;
compute* VxNew = (compute*) malloc(Grid->nVxTot * sizeof(compute));
compute* VyNew = (compute*) malloc(Grid->nVyTot * sizeof(compute));
compute Vx, Vy;
compute dt = Physics->dtAdv;
for (iy = 1; iy < Grid->nyVx-1; ++iy) {
for (ix = 1; ix < Grid->nxVx-1; ++ix) {
dVxdx = (Physics->Vx[ix+1 + iy *Grid->nxVx] - Physics->Vx[ix-1 + iy *Grid->nxVx])/(2.0*Grid->dx);
dVxdy = (Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy-1)*Grid->nxVx])/(2.0*Grid->dy);
#if (INERTIA)
dVxdx0 = (Physics->Vx0[ix+1 + iy *Grid->nxVx] - Physics->Vx0[ix-1 + iy *Grid->nxVx])/(2.0*Grid->dx);
dVxdy0 = (Physics->Vx0[ix + (iy+1)*Grid->nxVx] - Physics->Vx0[ix + (iy-1)*Grid->nxVx])/(2.0*Grid->dy);
#else
dVxdx0 = dVxdx;
dVxdy0 = dVxdy;
#endif
Vy = 0.25* (Physics->Vy[ix + (iy )*Grid->nxVy] + Physics->Vy[ix+1 + (iy )*Grid->nxVy] + Physics->Vy[ix + (iy-1)*Grid->nxVy] + Physics->Vy[ix+1 + (iy-1)*Grid->nxVy]);
//VxNew[ix+iy*Grid->nxVx] = Physics->Vx[ix + iy *Grid->nxVx]*(1.0-dt*dVxdx) - dt*Vy*dVxdy;
VxNew[ix+iy*Grid->nxVx] = Physics->Vx[ix + iy *Grid->nxVx]*(1.0-dt*.5*(dVxdx+dVxdx0)) - dt*Vy*.5*(dVxdy+dVxdy0);
}
}
for (iy = 1; iy < Grid->nyVy-1; ++iy) {
for (ix = 1; ix < Grid->nxVy-1; ++ix) {
dVydx = (Physics->Vy[ix+1 + iy *Grid->nxVy] - Physics->Vy[ix-1 + iy *Grid->nxVy])/(2.0*Grid->dx);
dVydy = (Physics->Vy[ix + (iy+1)*Grid->nxVy] - Physics->Vy[ix + (iy-1)*Grid->nxVy])/(2.0*Grid->dy);
#if (INERTIA)
dVydx0 = (Physics->Vy0[ix+1 + iy *Grid->nxVy] - Physics->Vy0[ix-1 + iy *Grid->nxVy])/(2.0*Grid->dx);
dVydy0 = (Physics->Vy0[ix + (iy+1)*Grid->nxVy] - Physics->Vy0[ix + (iy-1)*Grid->nxVy])/(2.0*Grid->dy);
#else
dVydx0 = dVydx;
dVydy0 = dVydy;
#endif
Vx = 0.25* (Physics->Vx[ix + (iy )*Grid->nxVx] + Physics->Vx[ix-1 + (iy )*Grid->nxVx] + Physics->Vx[ix + (iy+1)*Grid->nxVx] + Physics->Vx[ix-1 + (iy+1)*Grid->nxVx]);
//VyNew[ix+iy*Grid->nxVy] = Physics->Vy[ix + iy *Grid->nxVy]*(1.0-dt*dVydy) - Vx*dt*dVydx;
VyNew[ix+iy*Grid->nxVy] = Physics->Vy[ix + iy *Grid->nxVy]*(1.0-dt*.5*(dVydy+dVydy0)) - Vx*dt*.5*(dVydx+dVydx0);
}
}
int iVx, iVy, InoDir;
for (iy = 0; iy < Grid->nyVx; ++iy) {
for (ix = 0; ix < Grid->nxVx; ++ix) {
iVx = ix + iy*Grid->nxVx;
InoDir = NumStokes->map[iVx];
if (Grid->isPeriodic) {
printf("error: in Physics_interpFromParticlestoCell: the implementation of the interpolation of velocities from particles to cell is not finished for the case of periodic BC");
}
if (InoDir>=0) { // Not a Dirichlet node
Physics->Vx [iVx] = VxNew[iVx];
#if (INERTIA)
Physics->Vx0[iVx] = VxNew[iVx];
#endif
} else {
#if (INERTIA)
Physics->Vx0[iVx] = Physics->Vx[iVx];
#endif
}
}
}
for (iy = 0; iy < Grid->nyVy; ++iy) {
for (ix = 0; ix < Grid->nxVy; ++ix) {
iVy = ix + iy*Grid->nxVy;
InoDir = NumStokes->map[iVy + Grid->nVxTot];
if (Grid->isPeriodic) {
printf("error: in Physics_interpFromParticlestoCell: the implementation of the interpolation of velocities from particles to cell is not finished for the case of periodic BC");
}
if (InoDir>=0) { // Not a Dirichlet node
Physics->Vy [iVy] = VyNew[iVy];
#if (INERTIA)
Physics->Vy0[iVy] = VyNew[iVy];
#endif
} else {
#if (INERTIA)
Physics->Vy0[iVy] = Physics->Vy[iVy];
#endif
}
}
}
free(VxNew);
free(VyNew);
}
void Physics_Velocity_retrieveFromSolution(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BC = &(Model->BCStokes);
Numbering* Numbering = &(Model->NumStokes);
EqSystem* EqSystem = &(Model->EqStokes);
// Declarations
// =========================
int ix, iy, i;
int I;
int InoDir, INeigh;
// Init Vx, Vy, P to -1, for debugging purposes
// =========================
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx[i] = -1;
}
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy[i] = -1;
}
for (i = 0; i < Grid->nECTot; ++i) {
Physics->P[i] = -1;
}
// Set Vx
// =========================
int IBC;
compute scale;
#pragma omp parallel for private(iy, ix, I, InoDir, IBC, INeigh, scale) OMP_SCHEDULE // maxVx would conflict
for (iy = 0; iy < Grid->nyVx; ++iy) {
for (ix = 0; ix < Grid->nxVx; ++ix) {
I = ix + iy*Grid->nxVx;
InoDir = Numbering->map[I];
scale = 1.0;//EqSystem->S[InoDir];
if (InoDir>=0) { // Not a Dirichlet node
scale = 1.0;//EqSystem->S[InoDir];
Physics->Vx[I] = EqSystem->x[InoDir]*scale;
}
// Deal with boundary conditions
else { // Dirichlet or Neumann
IBC = abs(InoDir)-1; // BC nodes are numbered -1 to -n
if (BC->type[IBC]==Dirichlet) { // Dirichlet on normal node
Physics->Vx[I] = BC->value[IBC];
}
else if (BC->type[IBC]==Neumann) { // Neumann on normal node
// Get neighbours index
if (ix==0) { // left boundary
INeigh = Numbering->map[ ix+1 + (iy)*Grid->nxVx ];
if (INeigh<0) {
if (iy==0) {
INeigh = Numbering->map[ ix+1 + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) {
INeigh = Numbering->map[ ix+1 + (iy-1)*Grid->nxVx ];
}
}
Physics->Vx[I] = EqSystem->x[INeigh]*scale;// - BC->value[IBC] *Grid->dx/(2*Physics->Z[ix+1 + (iy)*Grid->nxEC ]);
} else if (ix==Grid->nxVx-1) { // right boundary
INeigh = Numbering->map[ ix-1 + (iy)*Grid->nxVx ];
if (INeigh<0) {
if (iy==0) {
INeigh = Numbering->map[ ix-1 + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) {
INeigh = Numbering->map[ ix-1 + (iy-1)*Grid->nxVx ];
}
}
Physics->Vx[I] = EqSystem->x[INeigh]*scale;// + BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy)*Grid->nxEC ]);
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Neumann Vx)\n");
exit(0);
}
}
else { // on a ghost node
// Get neighbours index
if (iy==0) { // lower boundary
INeigh = Numbering->map[ ix + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) { // upper boundary
INeigh = Numbering->map[ ix + (iy-1)*Grid->nxVx ];
} else {
//INeigh = 0;
INeigh = Numbering->map[ ix + (BC->iyTopRow-1)*Grid->nxVx ];
//printf("error internal BC are not properly taken into account yet. (Ghost Vx)\n");
//exit(0);
}
scale = 1.0;//EqSystem->S[INeigh];
if (BC->type[IBC]==DirichletGhost) { // Dirichlet
Physics->Vx[I] = 2.0*BC->value[IBC] - EqSystem->x[INeigh]*scale;
}
else if (BC->type[IBC]==NeumannGhost) { // Neumann
if (iy==0) // lower boundary
Physics->Vx[I] = EqSystem->x[INeigh]*scale - BC->value[IBC]/Physics->ZShear[ix + 0*Grid->nxS]*Grid->dy;
if (iy==Grid->nyVx-1) // top boundary
Physics->Vx[I] = EqSystem->x[INeigh]*scale + BC->value[IBC]/Physics->ZShear[ix + (Grid->nyS-1)*Grid->nxS]*Grid->dy;
}
else {
printf("error: unknown boundary type\n");
exit(0);
}
}
}
}
}
// Set Vy
// =========================
int IMap;
//#pragma omp parallel for private(iy, ix, I, IMap, InoDir, IBC, INeigh, scale) OMP_SCHEDULE // maxVx would conflict
for (iy = 0; iy < Grid->nyVy; ++iy) {
for (ix = 0; ix < Grid->nxVy; ++ix) {
IMap = ix + iy*Grid->nxVy + Grid->nVxTot;
I = ix + iy*Grid->nxVy;
InoDir = Numbering->map[IMap];
if (InoDir>=0) { // Not a Dirichlet node
scale = 1.0;//EqSystem->S[InoDir];
Physics->Vy[I] = EqSystem->x[InoDir]*scale;
}
// Deal with boundary conditions
else { // Dirichlet or Neumann
IBC = abs(InoDir)-1;
if (BC->type[IBC]==Dirichlet) { // Dirichlet on normal node
Physics->Vy[I] = BC->value[IBC];
}
else if (BC->type[IBC]==Neumann) {
// Get neighbours index
if (iy==0) { // lower boundary
INeigh = Numbering->map[ ix + (iy+1)*Grid->nxVy + Grid->nVxTot ];
if (INeigh<0) {
if (ix==0) {
INeigh = Numbering->map[ ix+1 + (iy+1)*Grid->nxVy ];
} else if (ix==Grid->nxVy-1) {
INeigh = Numbering->map[ ix-1 + (iy+1)*Grid->nxVy ];
}
}
Physics->Vy[I] = EqSystem->x[INeigh]*scale;// - BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy+1)*Grid->nxEC ]);
} else if (iy==Grid->nyVy-1) { // top boundary
INeigh = Numbering->map[ ix + (iy-1)*Grid->nxVy + Grid->nVxTot ];
if (INeigh<0) {
if (ix==0) {
INeigh = Numbering->map[ ix+1 + (iy-1)*Grid->nxVy ];
} else if (ix==Grid->nxVy-1) {
INeigh = Numbering->map[ ix-1 + (iy-1)*Grid->nxVy ];
}
}
Physics->Vy[I] = EqSystem->x[INeigh]*scale;// + BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy )*Grid->nxEC ]);
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Neumann Vy)\n");
exit(0);
}
}
else { // on a ghost node
// Get neighbours index
if (ix==0) { // left boundary
INeigh = Numbering->map[ ix+1 + (iy)*Grid->nxVy + Grid->nVxTot ];
} else if (ix==Grid->nxVy-1) { // right boundary
INeigh = Numbering->map[ ix-1 + (iy)*Grid->nxVy + Grid->nVxTot ];
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Ghost Vy)\n");
exit(0);
}
scale = 1.0;//EqSystem->S[INeigh];
if (BC->type[IBC]==DirichletGhost) { // Dirichlet
Physics->Vy[I] = 2.0*BC->value[IBC] - EqSystem->x[INeigh]*scale;
}
else if (BC->type[IBC]==NeumannGhost) { // Neumann
if (ix==0) // left boundary
Physics->Vy[I] = EqSystem->x[INeigh]*scale - BC->value[IBC]/Physics->ZShear[0 + iy*Grid->nxS]*Grid->dx;
if (ix==Grid->nxVy-1) // right boundary
Physics->Vy[I] = EqSystem->x[INeigh]*scale + BC->value[IBC]/Physics->ZShear[Grid->nxS-1 + iy*Grid->nxS]*Grid->dx;
}
else {
printf("error: unknown boundary type\n");
exit(0);
}
}
}
if (isnan(Physics->Vy[I])) {
printf("nan found in Vy, InoDir = %i \n", InoDir);
if (InoDir<0) {
IBC = abs(InoDir)-1;
printf("BC->type[IBC] = %i,ix = %i, iy = %i, BCValue =%.2e, ZshearR = %.2e\n",BC->type[IBC], ix, iy, BC->value[IBC],Physics->ZShear[Grid->nxS-1 + iy*Grid->nxS]);
}
//exit(0);
}
}
}
compute maxVx, maxVy;
compute Vx, Vy;
maxVx = 0.0;
maxVy = 0.0;
for (iy = 1; iy<Grid->nyEC-1; iy++) {
for (ix = 1; ix<Grid->nxEC-1; ix++) {
Vx = (Physics->Vx[ix-1+ iy *Grid->nxVx]+Physics->Vx[ix+ iy *Grid->nxVx])/2.0;
Vy = (Physics->Vy[ix + (iy-1)*Grid->nxVy]+Physics->Vx[ix+ (iy-1)*Grid->nxVy])/2.0;
maxVx = fmax(maxVx, fabs(Vx));
maxVy = fmax(maxVy, fabs(Vy));
}
}
Physics->maxVx = maxVx;
Physics->maxVy = maxVy;
}
#if (INERTIA)
void Physics_VelOld_POld_updateGlobal (Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// A better method would be to intervert the pointers;
int i;
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx0[i] = Physics->Vx[i];
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy0[i] = Physics->Vy[i];
}
}
#endif
void Physics_P_retrieveFromSolution(Model* Model)
{
Physics* Physics = &(Model->Physics);
Grid* Grid = &(Model->Grid);
BC* BCStokes = &(Model->BCStokes);
EqSystem* EqStokes = &(Model->EqStokes);
Numbering* NumStokes = &(Model->NumStokes);
int iCell;
#if (!DARCY)
// /!\ For visu it's better if all sides are Neumann
Physics_CellVal_retrieveFromSolution (Physics->P, 2, Grid, BCStokes, NumStokes, EqStokes);
// Shift pressure, taking the pressure of the upper left cell (inside) as reference (i.e. 0)
compute RefPressure = 0.0;// = Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];// - 1.0;//Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];//Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
int ix;
for (ix=0;ix<Grid->nxEC;++ix) {
//RefPressure += Physics->P[ix+(Grid->nyEC-2)*Grid->nxEC];
RefPressure += Physics->P[ix+(BCStokes->iyTopRow-1)*Grid->nxEC];
}
RefPressure/=Grid->nxEC;
//compute RefPressure = 0.0;
//compute RefPressure = Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];// - 1.0;//Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];//Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
/*
compute meanP = 0.0;
compute minP = 1e100;
compute maxP = -1e100;
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
meanP += Physics->P [iCell];
maxP = fmax(maxP, Physics->P [iCell]);
minP = fmin(minP, Physics->P [iCell]);
}
meanP/= (compute)Grid->nECTot;
RefPressure = meanP;
//printf("meanP = %.2e, minP = %.2e, maxP = %.2e\n",meanP, minP, maxP);
*/
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->P [iCell] = Physics->P [iCell] - RefPressure + Physics->Pback;
}
int iy;
for (iy=BCStokes->iyTopRow;iy<Grid->nyEC;++iy) {
for (ix=0;ix<Grid->nxEC;++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->P[iCell] = Physics->Pback; // Just for visualization, doesn't contribute to the solution
}
}
#else
int i;
Physics_CellVal_retrieveFromSolution (Physics->Pf, 2, Grid, BCStokes, NumStokes, EqStokes);
Physics_CellVal_retrieveFromSolution (Physics->Pc, 3, Grid, BCStokes, NumStokes, EqStokes);
// Shift pressure, taking the pressure of the upper left cell (inside) as reference (i.e. 0)
// Ref = average top row
//compute RefPressure = Physics->Pf[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
/*
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + (Grid->nyEC-2)*Grid->nxEC;
RefPressure += Physics->Pf[iCell];
}
RefPressure /= Grid->nxEC;
*/
/*
compute RefPressure = 0.0;//Physics->Pf[1 + (Grid->nyEC-2)*Grid->nxEC];
for (iy = 0; iy < Grid->nyEC-1; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->Pf [iCell] = Physics->Pf [iCell] - RefPressure;
}
}
RefPressure = 0.0;//Physics->Pc[1 + (Grid->nyEC-2)*Grid->nxEC];
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->Pc [iCell] = Physics->Pc [iCell] - RefPressure;
}
*/
// Fill P, the total pressure
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->P[iCell] = Physics->Pc[iCell] + Physics->Pf[iCell];
}
#endif
}
#if (HEAT)
void Physics_T_retrieveFromSolution(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BCThermal = &(Model->BCThermal);
Numbering* NumThermal = &(Model->NumThermal);
EqSystem* EqThermal = &(Model->EqThermal);
Physics_CellVal_retrieveFromSolution (Physics->T, 0, Grid, BCThermal, NumThermal, EqThermal);
}
#endif
void Physics_Sigma0_updateGlobal_fromGrid(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int ix, iy, iCell, iNode;
#pragma omp parallel for private(iy, ix, iCell) OMP_SCHEDULE
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->sigma_xx_0[iCell] += Physics->Dsigma_xx_0[iCell];
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->sigma_xx_0, Grid);
#pragma omp parallel for private(iy, ix, iNode) OMP_SCHEDULE
for (iy = 0; iy < Grid->nyS; ++iy) {
for (ix = 0; ix < Grid->nxS; ++ix) {
iNode = ix + iy*Grid->nxS;
Physics->sigma_xy_0[iNode] += Physics->Dsigma_xy_0[iNode];
}
}
}
void Physics_Dsigma_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BC = &(Model->BCStokes);
Numerics* Numerics = &(Model->Numerics);
// see Taras' book p. 186
int ix, iy, iCell, iNode;
compute Z;
compute Eps_xx, Eps_xy;
compute dVxdy, dVydx, dVxdx, dVydy;
compute G;
compute dt = Physics->dt;
printf("dt = %.2e, dtAdv= %.2e\n", Physics->dt, Physics->dtAdv);
//#pragma omp parallel for private(iy, ix, iCell, dVxdx, dVydy, Eps_xx) OMP_SCHEDULE
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
Eps_xx = 0.5*(dVxdx-dVydy);
//Physics->Dsigma_xx_0[iCell] = 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt)) - Physics->sigma_xx_0[iCell];
compute SxxVE = 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt));
Physics->Dsigma_xx_0[iCell] = SxxVE*Physics->Lambda[iCell] - Physics->sigma_xx_0[iCell];
//Physics->Dsigma_xx_0[iCell] = SxxVE - Physics->sigma_xx_0[iCell];
#if (USE_UPPER_CONVECTED)
/*
// upper convected correction for the rotation of stresses
compute sigma_xy_0 = Interp_NodeVal_Node2Cell_Local(Physics->sigma_xy_0,ix,iy,Grid->nxS);
// Anton's trick
dVxdy = 0.0;
compute Sxy_x_Dvxdy = 0.0;
int iN, Ix, Iy;
int IxMod[4] = {0,1,1,0}; // lower left, lower right, upper right, upper left
int IyMod[4] = {0,0,1,1};
for (iN = 0; iN < 4; ++iN) {
Ix = (ix-1)+IxMod[iN];
Iy = (iy-1)+IyMod[iN];
dVxdy += 0.25*( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
Sxy_x_Dvxdy += 0.25*Physics->sigma_xy_0[Ix+Iy*Grid->nxS]*( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
}
//Physics->Dsigma_xx_0[iCell] += 2.0 * Physics->Z[iCell]/(Physics->G[iCell])*(Physics->sigma_xx_0[iCell]*dVxdx + sigma_xy_0*dVxdy );
Physics->Dsigma_xx_0[iCell] += 2.0 * Physics->Z[iCell]/(Physics->G[iCell])*(Physics->sigma_xx_0[iCell]*dVxdx + Sxy_x_Dvxdy );
*/
#endif
//Physics->Dsigma_xx_0[iCell] *= Physics->dtAdv/Physics->dt; // To update by the right amount according to the time step
if (isnan(Physics->Dsigma_xx_0[iCell])) {
printf("isnan Physics->Dsigma_xx_0[iCell]\n");
}
if (Numerics->timeStep>0) {
//Physics->Dsigma_xx_0[iCell] = 0.5*Physics->dtAdv* (Physics->Dsigma_xx_0[iCell]/Physics->dtAdv + Ds0_old/Physics->dtAdv0); // Crank-Nicolson, buggy!!
}
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Dsigma_xx_0, Grid);
//#pragma omp parallel for private(iy, ix, iNode,dVxdy, dVydx, Eps_xy, G, Z) OMP_SCHEDULE
for (iy = 0; iy < Grid->nyS; ++iy) {
for (ix = 0; ix < Grid->nxS; ++ix) {
iNode = ix + iy*Grid->nxS;
dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy;
dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx;
Eps_xy = 0.5*(dVxdy+dVydx);
//G = Interp_ECVal_Cell2Node_Local(Physics->G, ix, iy, Grid->nxEC);
G = Physics->GShear[iNode];
Z = Physics->ZShear[iNode];
//Physics->Dsigma_xy_0[iNode] = 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt)) - Physics->sigma_xy_0[iNode];
compute SxyVE = 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt));
Physics->Dsigma_xy_0[iNode] = SxyVE*Physics->LambdaShear[iNode] - Physics->sigma_xy_0[iNode];
#if (USE_UPPER_CONVECTED)
/*
compute sigma_xx_0 = Interp_ECVal_Cell2Node_Local(Physics->sigma_xx_0,ix,iy,Grid->nxEC);
Physics->Dsigma_xy_0[iNode] += 1.0*Z/G * (sigma_xx_0*(dVydx-dVxdy));
*/
#endif
Physics->Dsigma_xy_0[iNode] *= Physics->dtAdv/Physics->dt;
if (isnan(Physics->Dsigma_xy_0[iNode])) {
printf("isnan Physics->Dsigma_xy_0[iNode]\n");
}
if (Numerics->timeStep>0) {
//Physics->Dsigma_xy_0[iNode] = 0.5*Physics->dtAdv* (Physics->Dsigma_xy_0[iNode]/Physics->dtAdv + Ds0_old/Physics->dtAdv0); // Crank-Nicolson
}
// Ensure free slip
if (ix==0 && BC->IsFreeSlipLeft) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (ix==Grid->nxS && BC->IsFreeSlipRight) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (iy == 0 && BC->IsFreeSlipBot) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (iy==Grid->nyS && BC->IsFreeSlipTop) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
}
}
#if (DARCY)
compute Bulk, Zb, divV, DeltaP0, DeltaP;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
phi = Physics->phi[iCell];
Bulk = Physics->G[iCell]/sqrt(phi);
divV = ( Physics->Vx[ix+iy*Grid->nxVx] - Physics->Vx[ix-1+ iy *Grid->nxVx] )/Grid->dx;
divV += ( Physics->Vy[ix+iy*Grid->nxVy] - Physics->Vy[ix +(iy-1)*Grid->nxVy] )/Grid->dy;
DeltaP0 = Physics->DeltaP0[iCell];
Zb = Physics->Zb[iCell];
DeltaP = Zb * ( - divV + DeltaP0/(Bulk*dt) ); // Pc
Physics->DDeltaP[iCell] = DeltaP - Physics->DeltaP0[iCell];
Physics->DDeltaP[iCell] *= Physics->dtAdv/Physics->dt;
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->DDeltaP, Grid);
#endif
}
compute Physics_sigma_xxVE_getLocalCell(Model* Model, int ix, int iy) {
// Where ix and iy are the indices of a Cell
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell = ix + iy*Grid->nxEC;
compute dt = Physics->dt;
compute dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
compute dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
compute Eps_xx = 0.5*(dVxdx-dVydy);
return 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt));
}
compute Physics_sigma_xyVE_getLocalNode(Model* Model, int ix, int iy) {
// Where ix and iy are the indices of a Node
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iNode = ix + iy*Grid->nxS;
compute dt = Physics->dt;
compute dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy;
compute dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx;
compute Eps_xy = 0.5*(dVxdy+dVydx);
//G = Interp_ECVal_Cell2Node_Local(Physics->G, ix, iy, Grid->nxEC);
compute G = Physics->GShear[iNode];
compute Z = Physics->ZShear[iNode];
return 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt)) - Physics->sigma_xy_0[iNode];
}
void Physics_StrainRateInvariant_getLocalCell(Model* Model, int ix, int iy, compute* EII)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
compute dVxdy, dVydx, dVxdx, dVydy;
compute ShearComp_sqr;
int iNode, Ix, Iy;
int IxMod[4] = {0,1,1,0}; // lower left, lower right, upper right, upper left
int IyMod[4] = {0,0,1,1};
dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx]
- Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy]
- Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
// Method A: using the averaging of derivatives on the four nodes
// Compute Eps_xy at the four nodes of the cell
// 1. Sum contributions
dVxdy = 0;
dVydx = 0;
ShearComp_sqr = 0.0;
for (iNode = 0; iNode < 4; ++iNode) {
Ix = (ix-1)+IxMod[iNode];
Iy = (iy-1)+IyMod[iNode];
dVxdy = ( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
dVydx = ( Physics->Vy[(Ix+1)+(Iy )*Grid->nxVy]
- Physics->Vy[(Ix )+(Iy )*Grid->nxVy] )/Grid->dx;
//printf("koko\n");
ShearComp_sqr += (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) ;
}
*EII = sqrt( (0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) + 0.25*ShearComp_sqr );
}
void Physics_StrainRateInvariant_getLocalNode(Model* Model, int ix, int iy, compute* EII)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// Be careful, Anton's trick not in!!
compute dVxdy, dVydx, dVxdx, dVydy;
dVxdy = (Physics->Vx[(ix ) + (iy+1)*Grid->nxVx]
- Physics->Vx[(ix ) + (iy )*Grid->nxVx])/Grid->dy;
dVydx = (Physics->Vy[(ix+1) + (iy )*Grid->nxVy]
- Physics->Vy[(ix ) + (iy )*Grid->nxVy])/Grid->dx;
compute dVxdxCell[4], dVydyCell[4]; // order: NE, NW, SW, SE
// use Anton's trick for the inner nodes
if (ix>0 && ix<Grid->nxS-1 && iy>0 && iy<Grid->nyS-1) {
dVxdxCell[0] = (Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix )+(iy+1)*Grid->nxVx])/Grid->dx;
dVxdxCell[1] = (Physics->Vx[(ix )+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx])/Grid->dx;
dVxdxCell[2] = (Physics->Vx[(ix )+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx])/Grid->dx;
dVxdxCell[3] = (Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix )+(iy )*Grid->nxVx])/Grid->dx;
dVydyCell[0] = (Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy )*Grid->nxVy])/Grid->dy;
dVydyCell[1] = (Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy )*Grid->nxVy])/Grid->dy;
dVydyCell[2] = (Physics->Vy[(ix )+(iy )*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy])/Grid->dy;
dVydyCell[3] = (Physics->Vy[(ix+1)+(iy )*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy])/Grid->dy;
compute NormalComp_sqr = 0.0;
int iCell;
for (iCell = 0; iCell < 4; ++iCell) {
dVxdx = dVxdxCell[iCell];
dVydy = dVydyCell[iCell];
NormalComp_sqr += 0.25*(0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) ;
}
*EII = sqrt( NormalComp_sqr + (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) );
} else {
if (Grid->isPeriodic) {
if (ix == 0 || ix == Grid->nxS-1) {
dVxdx = ( Physics->Vx[(1)+(iy+1)*Grid->nxVx] - Physics->Vx[(Grid->nxVx-1 -1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(1)+(iy )*Grid->nxVx] - Physics->Vx[(Grid->nxVx-1 -1)+(iy )*Grid->nxVx] )/4./Grid->dx;
}
else {
dVxdx = 0.0;
printf("error in Physics_StrainRateInvariant_getLocalNode. Shouldn't come to this condition\n");
}
}
else {
if (ix == 0) {
dVxdx = ( Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix )+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix )+(iy )*Grid->nxVx] )/2./Grid->dx;
} else if (ix == Grid->nxS-1) {
dVxdx = ( Physics->Vx[(ix )+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix )+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx] )/2./Grid->dx;
} else {
dVxdx = ( Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx] )/4./Grid->dx;
}
}
if (iy == 0) {
dVydy = ( Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy )*Grid->nxVy] +
Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy )*Grid->nxVy] )/2./Grid->dy;
} else if (iy == Grid->nyS-1) {
dVydy = ( Physics->Vy[(ix+1)+(iy )*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy] +
Physics->Vy[(ix )+(iy )*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy] )/2./Grid->dy;
} else {
dVydy = ( Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy] +
Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy] )/4./Grid->dy;
}
// the top and bottom row should never be needed
*EII = sqrt( (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) + (0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) );
}
}
compute Physics_StressInvariant_getLocalCell(Model* Model, int ix, int iy)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell = ix + iy*Grid->nxEC;
#if (DARCY)
compute phi = Physics->phi[iCell];
#else
compute phi = 0.0;
#endif
return (1.0-phi)*2.0*Physics->Z[iCell]*Physics->EII_eff[iCell]*Physics->Lambda[iCell];
}
compute Physics_StressInvariant_getLocalNode(Model* Model, int ix, int iy)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iNode = ix + iy*Grid->nxS;
#if (DARCY)
compute phi = Interp_ECVal_Cell2Node_Local(Physics->phi, ix, iy, Grid->nxEC);
#else
compute phi = 0.0;
#endif
return (1.0-phi)*2.0*Physics->ZShear[iNode]*Physics->EII_effShear[iNode]*Physics->LambdaShear[iNode];
}
void Physics_dt_update(Model* Model) {
// Time step selection based on the analytical solution of the visco-elastic build up
// Stress build up equation:
// Sxx = 2*eta*Exx * ( 1 - exp(-G/eta*t) ) [1]
// Derivative wrt time:
// dSxx/dt = 2*G*Exx * exp(-G/eta*t) [2]
// Solution for the time at a given stress Sxx0:
// t = eta/G * ln(2*eta*Exx / (2*eta*Exx - Sxx0))
// can be rewritten:
// t = tM * ln(1/(1-Sxx0/SxxV_max)
// with the maxwell time tM = eta/G
// and the maximum viscous stress (when the viscous strain rate is equal to the total strain rate): SxxV_max = (2*eta*Exx)
// Let's limit the time step size based on the increment of strain.
// The increment of strain is given by:
// DeltaSxx = dSxx/dt * dt // where dt is the time step size
// Then from eq. 2:
// dt = DeltaSxx / (2*G*Exx * exp(-G/eta*t)) [3]
// DeltaSxx can be chosen, for example as a fraction of the maximum viscous stress or of the yield stress.
// SxxLimit = min(SxxV_max,Syield)
// DeltaSxx = SxxLimit/n, where n is a non dimensional number representing the fraction of stress
#if (DARCY)
printf("Time step size selection method not yet adapted to Darcy\n");
exit(0);
#endif
// Here comes the implementation
Physics* Physics = &(Model->Physics);
Grid* Grid = &(Model->Grid);
MatProps* MatProps = &(Model->MatProps);
Numerics* Numerics = &(Model->Numerics);
Char* Char = &(Model->Char);
if (Numerics->dtMin!=Numerics->dtMax) {
SinglePhase* thisPhaseInfo;
compute weight, sumOfWeights;
compute cohesion, frictionAngle;
compute P;
compute Sigma_v_max; // maximum viscous stress (if total strain rate = viscous strain rate)
compute Sigma_yield;
compute Sigma_limit;
int phase;
compute sq_sigma_xy0, sigma_xx0, sigmaII0;
compute DeltaSigma;
compute new_dt = 1e200;
compute dtOld = Physics->dt;
compute EII, sigmaII;
compute smallest_dt = 1e100;
int ix, iy, iCell;
compute eta;
//compute DeltaSigma_Max = 0.0;
compute DeltaSigma_min = Numerics->deltaSigmaMin;
//compute stressFac = 1.0;//fmax(0.0,Numerics->dt_stressFac-Numerics->deltaSigmaMin);
//compute stressFac = Numerics->dt_stressFac;
//Numerics->dt_DeltaSigma_min_stallFac = 1e100;
/*
if (Numerics->timeStep<=0) {
Numerics->dt_DeltaSigma_min_stallFac = 1.0;
} else {
if (!fmod(Numerics->stallingCounter+1,5) && EqStokes->normResidual>10.0*Numerics->absoluteTolerance) {
Numerics->dt_DeltaSigma_min_stallFac/=2.0;
} else {
if(Numerics->itNonLin==0) {
Numerics->dt_DeltaSigma_min_stallFac *= 1.25; // slowly recovers
Numerics->dt_DeltaSigma_min_stallFac = fmin(1.0,Numerics->dt_DeltaSigma_min_stallFac);
}
}
}
Numerics->dt_DeltaSigma_min_stallFac = fmax(Numerics->dt_DeltaSigma_min_stallFac, 1e-3);
*/
compute P_E, EP_E, V_E, VP_E, VP_EP;
compute counter = 0;
compute av_EP_E = 0.0;
compute minP_E = 1e100;
compute minEP_E = 1e100;
compute minV_E = 1e100;
compute minVP_E = 1e100;
compute minVP_EP = 1e100;
compute maxEP_E = 0.0;
bool somethingIsPlastic = false;
compute refTime_noPlast;
//compute refTime_Plast;
compute minRefTime_noPlast = 1e100;
compute maxRefTime_noPlast = 0.0;
for (iy=1;iy<Grid->nyEC-1; ++iy) {
for (ix=1;ix<Grid->nxEC-1; ++ix) {
iCell = ix +iy*Grid->nxEC;
if (MatProps->use_dtMaxwellLimit[Physics->phase[iCell]] && Physics->khi[iCell] > 1e29) {
eta = Physics->eta[iCell];
// Compute sigmaII0
sq_sigma_xy0 = Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy )*Grid->nxS] * Physics->sigma_xy_0[ix +(iy )*Grid->nxS];
sigma_xx0 = Physics->sigma_xx_0[iCell];// + Physics->Dsigma_xx_0[iCell];
sigmaII0 = sqrt((sigma_xx0)*(sigma_xx0) + 0.25*(sq_sigma_xy0));
// Compute sigmaII
sigmaII = Physics_StressInvariant_getLocalCell(Model, ix, iy);
// Get cohesion and frictionAngle
if (Numerics->timeStep<=0 && Numerics->itNonLin<1) {
EII = 1.0; // The reference strain in this case is (1/Char.time) / Char.time = 1.0
Sigma_limit = 2.0*eta*EII/1000.0;
//printf("Svmax = %.2e, Syield = %.2e, Slimit = %.2e, cohesion = %.2e, frictionAngle = %.2e, P = %.2e\n", Sigma_v_max, Sigma_yield, Sigma_limit, cohesion, frictionAngle, P);
} else {
// Compute EII
Physics_StrainRateInvariant_getLocalCell(Model, ix, iy, &EII);
// Get stress limit
if (0) {
if (Physics->khi[iCell]<1e29) {
Sigma_v_max = 2.0*eta*EII;
Sigma_yield = Physics->Tau_y[iCell];
Sigma_limit = fmin(Sigma_v_max,Sigma_yield);
} else {
Sigma_limit = 2.0*eta*EII*1.0;
}
} else {
Sigma_v_max = 2.0*eta*EII;
Sigma_yield = Physics->Tau_y[iCell];
Sigma_limit = Sigma_yield;
}
}
compute dSigma = fabs(sigmaII - sigmaII0);
if (sigmaII>Sigma_limit) {
//printf("SII>Slim!! sigmaII = %.2e, sigma_limit = %.2e, P = %.2e, Sigma_v_max = %.2e,Sigma_yield = %.2e\n", sigmaII, Sigma_limit, P, Sigma_v_max,Sigma_yield);
sigmaII = Sigma_limit; // because the time step is updated before the viscosity, so stress can be a bit higher than the yield at that moment.
}
// Get DeltaSigma
//DeltaSigma = Sigma_limit*stressFac;
DeltaSigma = DeltaSigma_min;//stressFac * (Sigma_limit-sigmaII)/Sigma_limit + DeltaSigma_min;
//DeltaSigma = 0.05 * (Sigma_limit-sigmaII)/Sigma_limit + DeltaSigma_min;
//DeltaSigma *= Numerics->dt_DeltaSigma_min_stallFac;
new_dt = dtOld * (DeltaSigma/dSigma);
if (new_dt<0) {
printf("DeltaSigma = %.2e, Sigma_limit = %.2e, sigmaII = %.2e, dSigma = %.2e\n", DeltaSigma, Sigma_limit, sigmaII, dSigma);
exit(0);
}
// compute the corresponding time in the analytical solution
//refTime_noPlast = eta/Physics->G[iCell] * log(2*eta*EII / (2*eta*EII - sigmaII0 ));
refTime_noPlast = eta/Physics->G[iCell] * log(2.0*eta*EII / (2.0*eta*EII - Sigma_limit ));
minRefTime_noPlast = fmin(minRefTime_noPlast,refTime_noPlast);
maxRefTime_noPlast = fmax(maxRefTime_noPlast,refTime_noPlast);
// compute dt using eq. [3]
//dt = DeltaSigma / (2*G*EII * exp(-G/eta*t));
//if (new_dt<smallest_dt) {
//DeltaSigma_Max = dSigma;
//iyLim = iy;
//printf("DeltaSigma = %.2e, dSigma = %.2e, new_dt = %.2e, smallest_dt = %.2e, Physics->dt = %.2e\n",DeltapSigma, dSigma, new_dt, smallest_dt, Physics->dt);
//}
smallest_dt = fmin(smallest_dt, new_dt);
V_E = (Physics->eta[iCell]) / (Physics->G[iCell]);
minV_E = fmin(minV_E ,V_E);
} else if (MatProps->use_dtMaxwellLimit[Physics->phase[iCell]] && Physics->khi[iCell] <= 1e29) {
V_E = (Physics->eta[iCell]) / (Physics->G[iCell]);
minV_E = fmin(minV_E ,V_E);
somethingIsPlastic = true;
EP_E = (1.0/(1.0/(Physics->G[iCell]*Physics->dt) + 1.0/Physics->khi[iCell])) / (Physics->G[iCell]);
minEP_E = fmin(minEP_E ,EP_E);
maxEP_E = fmax(maxEP_E ,EP_E);
av_EP_E += EP_E;
counter += 1.0;
P_E = (Physics->khi[iCell]) / (Physics->G[iCell]);
minP_E = fmin(minP_E ,P_E);
VP_EP = (1.0/(1.0/(Physics->eta[iCell]) + 1.0/Physics->khi[iCell])) / (1.0/(1.0/Physics->G[iCell] + Physics->dt/Physics->khi[iCell]));
minVP_EP = fmin(minVP_EP ,VP_EP);
VP_E = (1.0/(1.0/(Physics->eta[iCell]) + 1.0/Physics->khi[iCell])) / (Physics->G[iCell]);
minVP_E = fmin(minVP_E ,VP_E);
//printf("VP_E = %.2e, EP_E = %.2e\n",VP_E, EP_E);
}
}
}
av_EP_E /= counter;
Physics->dt = (smallest_dt+Physics->dt)/2.0;
compute dtStress = smallest_dt;
if (smallest_dt==1e100) { // unlikely case where everything is breaking
smallest_dt = dtOld;
printf("The unlikely happened\n");
}
/*
if (Numerics->timeStep <= 0) {
Numerics->dtCorr = dtOld;
Numerics->dtPrevCorr = Numerics->dtCorr;
Numerics->dtAlphaCorr = Numerics->dtAlphaCorrIni;
Physics->dt = dtOld;
} else {
Numerics->dtCorr = Numerics->dtAlphaCorr * (smallest_dt-dtOld);
//if (fabs(Numerics->dtCorr)/dtOld<0.05) { // avoids small changes
// Numerics->dtCorr = 0.0;
//}
//printf("Numerics->dtCorr = %.2e, Numerics->dtPrevCorr = %.2e, Ratio = %.2e\n", Numerics->dtCorr, Numerics->dtPrevCorr, Numerics->dtCorr/Numerics->dtPrevCorr);
if (Numerics->dtCorr/Numerics->dtPrevCorr<-0.9) {
Numerics->dtAlphaCorr /= 2.0;
} else {
Numerics->dtAlphaCorr *= 1.25;
}
Numerics->dtAlphaCorr = fmin(Numerics->dtAlphaCorrIni, Numerics->dtAlphaCorr);
Physics->dt = dtOld + Numerics->dtCorr;
Numerics->dtPrevCorr = Numerics->dtCorr;
}
printf("dtNow = %.2e, Numerics->dtCorr = %.2e, smallest_dt = %2e., dtOld = %.2e\n", Physics->dt, Numerics->dtCorr, smallest_dt, dtOld);
//Physics->dt = dtOld;
*/
Numerics->lsGoingDown = false;
Numerics->lsGoingUp = false;
compute tol = 0.001;
printf("(Physics->dt-dtOld)/dtOld = %.2e, dt = %.2e, dtOld = %.2e\n", (Physics->dt-dtOld)/Physics->dt, Physics->dt, dtOld);
if ((Physics->dt-dtOld)/dtOld<-tol) { // going down
Numerics->lsGoingDown = true;
printf("going down0\n");
} else { // going up
Numerics->lsGoingUp = true;
}
Physics->dt = fmin(Numerics->dtMax, Physics->dt);
Physics->dt = fmax(Numerics->dtMin, Physics->dt);
// dtAdv
Physics->dtAdv = Numerics->CFL_fac_Stokes*Grid->dx/(Physics->maxVx); // note: the min(dx,dy) is the char length, so = 1
Physics->dtAdv = fmin(Physics->dtAdv, Numerics->CFL_fac_Stokes*Grid->dy/(Physics->maxVy));
compute dtAdvAlone = Physics->dtAdv;
//Physics->dtAdv = fmin(Physics->dtAdv, Physics->dt);
//Physics->dtAdv = fmax(Physics->dtAdv, 0.001*dtAdvAlone);
compute alpha_lim = 5.0*PI/180.0;
compute dtRot;
compute dtRotMin = 1e100;
compute omega;
if (Numerics->timeStep>0) {
// Compute the Alpha array
// add a condi ztion with signX signY to avoid recomputing alpha if not necessary
#pragma omp parallel for private(iy, ix) OMP_SCHEDULE
for (iy=0; iy<Grid->nyS; iy++) {
for (ix=0; ix<Grid->nxS; ix++) {
omega = .5*((Physics->Vy[ix+1 + (iy )*Grid->nxVy] - Physics->Vy[ix +(iy )*Grid->nxVy])/Grid->DXEC[ix]
- (Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix +(iy )*Grid->nxVx])/Grid->DYEC[iy]);
dtRot = alpha_lim/fabs(omega);
dtRotMin = fmin(dtRotMin,dtRot);
}
}
Physics->dtAdv = fmin(dtRotMin,Physics->dtAdv);
}
//Physics->dtAdv = fmin(dtStress,Physics->dtAdv);
compute dtPFac = Numerics->dt_plasticFac;
compute dtPlastic = 0.0;
if (dtPFac<1.0) {
dtPlastic = (1.0-dtPFac)*minEP_E+dtPFac*minVP_EP;
} else {
dtPlastic = dtPFac*minVP_EP;
}
if (somethingIsPlastic) {
//compute dtPlastic = 0.99*minVP_EP;
Numerics->subgridStressDiffTimeScale = minVP_EP;
Physics->dtAdv = fmin(Physics->dtAdv,dtPlastic);
} else {
Numerics->subgridStressDiffTimeScale = minV_E; // i.e. Maxwell time
}
if (Numerics->timeStep>0) {
compute ana_Fac = Numerics->dt_stressFac;
Physics->dtAdv = fmin(Physics->dtAdv,ana_Fac*minRefTime_noPlast);
}
if (Numerics->timeStep>0) {
Physics->dtAdv = fmin(2.0*dtOld, Physics->dtAdv);
//Physics->dtAdv = fmax(0.9*dtOld, Physics->dtAdv);
}
Physics->dtAdv = fmin(Numerics->dtMax, Physics->dtAdv);
Physics->dtAdv = fmax(Numerics->dtMin, Physics->dtAdv);
#if (ADV_INTERP)
Physics->dt = Physics->dtAdv;
#else
Physics->dtAdv = Physics->dt;
#endif
compute yr = (3600.0*24.0*365.0);
printf("scaled_dt = %.2e yr, dtMin = %.2e, dtMax = %.2e, Numerics->dtAlphaCorr = %.2e, dtStress = %.2e, dtAdvAlone = %.2e, dtRotMin = %.2e, dtPlastic = %.2e, Physics->dt = %.2e\n", Physics->dt*Char->time/yr, Numerics->dtMin, Numerics->dtMax, Numerics->dtAlphaCorr, dtStress, dtAdvAlone, dtRotMin, dtPlastic, Physics->dt);
printf("minEP/E = %.2e yr, maxEP/E = %.2e yr, avEP_E = %.2e, P/E = %.2e yr, V/E = %.2e yr, VP/E = %.2e yr, VP/EP = %.2e yr, minRefTime_noPlast = %.2e yr, Fac*minRefTime_noPlast = %.2e yr, maxRefTime_noPlast = %.2e yr\n", minEP_E*Char->time/yr, maxEP_E*Char->time/yr, av_EP_E*Char->time/yr, minP_E*Char->time/yr, minV_E*Char->time/yr, minVP_E*Char->time/yr, minVP_EP*Char->time/yr, minRefTime_noPlast*Char->time/yr, Numerics->dt_stressFac*minRefTime_noPlast*Char->time/yr, maxRefTime_noPlast*Char->time/yr);
} else {
Physics->dt = Numerics->dtMin;
Physics->dtAdv = Numerics->dtMin;
}
}
#if (DARCY)
void Physics_Perm_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
MatProps* MatProps = &(Model->MatProps);
Physics->minPerm = 1E100;
int iy, ix;
int iCell;
compute phi;
compute phiRef = 0.0001;
compute PermEffRef = MatProps->perm0_eta_f[0] * phiRef*phiRef*phiRef * ( (1.0-phiRef)*(1.0-phiRef));
compute perm0;
SinglePhase* thisPhaseInfo;
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
phi = Physics->phi[iCell];
perm0 = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
perm0 += MatProps->perm0_eta_f[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
perm0 /= Physics->sumOfWeightsCells[iCell];
Physics->perm_eta_f[iCell] = perm0 * phi*phi*phi * ( (1.0-phi)*(1.0-phi));
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->perm_eta_f, Grid);
}
void Physics_Phi_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
int iy, ix;
int iCell;
compute dt = Physics->dtAdv;
int nxVx = Grid->nxVx;
int nxVy = Grid->nxVy;
compute dx, dy;
compute divV;
compute sum = 0.0;
compute maxDiv = 0;
compute maxPhi = 0;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
dx = Grid->DXS[ix-1];
dy = Grid->DYS[iy-1];
divV = ( Physics->Vx[ix+iy*nxVx] - Physics->Vx[ix-1+ iy *nxVx] )/dx;
divV += ( Physics->Vy[ix+iy*nxVy] - Physics->Vy[ix +(iy-1)*nxVy] )/dy;
Physics->phi[iCell] = Physics->phi0[iCell] + dt*0.5*( (1.0-Physics->phi0[iCell])*Physics->divV0[iCell] + (1.0-Physics->phi[iCell])*divV );
if (Physics->phi[iCell] > Numerics->phiMax) {
Physics->phi[iCell] = Numerics->phiMax;
} else if (Physics->phi[iCell] < Numerics->phiMin) {
Physics->phi[iCell] = Numerics->phiMin;
}
Physics->Dphi[iCell] = Physics->phi[iCell] - Physics->phi0[iCell];
if (fabs(divV)>maxDiv) {
maxDiv = fabs(divV);
}
if (fabs(Physics->phi[iCell])>maxPhi) {
maxPhi = fabs(Physics->phi[iCell]);
}
sum += Physics->phi[iCell];
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->phi, Grid);
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Dphi, Grid);
}
#endif
void Physics_Rho_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
MatProps* MatProps = &(Model->MatProps);
int iCell;
SinglePhase* thisPhaseInfo;
#pragma omp parallel for private(iCell, thisPhaseInfo) OMP_SCHEDULE
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->rho[iCell] = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
Physics->rho[iCell] += MatProps->rho0[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
Physics->rho[iCell] /= Physics->sumOfWeightsCells[iCell];
#if (DARCY)
Physics->rho[iCell] = (1.0 - Physics->phi[iCell])*Physics->rho[iCell] + Physics->phi[iCell]*Physics->rho_f;
#endif
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->rho, Grid);
}
/*
compute Physics_getFromMatProps_ForOneCell(Physics* Physics, compute* ListFromMatProps, MatProps* MatProps, int iCell) {
SinglePhase* thisPhaseInfo;
compute value = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
value += ListFromMatProps[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
return value /= Physics->sumOfWeightsCells[iCell];
}
*/
void Physics_Phase_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Particles* Particles = &(Model->Particles);
MatProps* MatProps = &(Model->MatProps);
int ix, iy, iCell, iNode;
//coord depth, y;
SingleParticle* thisParticle;
//compute locX, locY;
int IxNode[] = {-1, 0, -1, 0};
int IyNode[] = {-1, -1, 0, 0};
int iPhase;
compute contribPhase[NB_PHASE_MAX];
compute maxContrib;
int phaseAir = Physics->phaseAir;
int phaseWater;
compute contribPhaseAir, contribPhaseWater;
if (Physics->phaseWater==-1) {
phaseWater = Physics->phaseAir;
} else {
phaseWater = Physics->phaseWater;
}
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix+iy*Grid->nxEC;
// Reinitialize contribs
// ===================
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
contribPhase[iPhase] = 0;
}
// Count contribs
// ===================
for (iNode = 0; iNode < 4; ++iNode) {
thisParticle = Particles->linkHead[ix+IxNode[iNode] + (iy+IyNode[iNode])*Grid->nxS];
while (thisParticle != NULL) {
++contribPhase[thisParticle->phase];
thisParticle = thisParticle->next;
}
}
if (phaseAir>-1) {
contribPhaseAir = contribPhase[phaseAir];
}else {
contribPhaseAir = 0.0;
}
if (phaseWater>-1) {
contribPhaseWater = contribPhase[phaseWater];
}else {
contribPhaseWater = 0.0;
}
if (contribPhaseAir>0) {
Physics->phase[iCell] = phaseAir;
} else if (contribPhaseWater>0) {
Physics->phase[iCell] = phaseWater;
} else {
// Find the most prominent phase
// ===================
maxContrib = 0;
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
if (contribPhase[iPhase] > maxContrib) {
Physics->phase[iCell] = iPhase;
maxContrib = contribPhase[iPhase];
}
}
}
/*
// Find the most prominent phase
// ===================
maxContrib = 0;
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
if (contribPhase[iPhase] > maxContrib) {
Physics->phase[iCell] = iPhase;
maxContrib = contribPhase[iPhase];
}
}
*/
}
}
Physics_CellVal_SideValues_copyNeighbours_Global_i(Physics->phase,Grid);
}
void Physics_PhaseList_reinit(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell;
SinglePhase* temp;
#pragma omp parallel for private(iCell, temp) OMP_SCHEDULE
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
while (Physics->phaseListHead[iCell]->next!=NULL) {
temp = Physics->phaseListHead[iCell];
Physics->phaseListHead[iCell] = Physics->phaseListHead[iCell]->next;
free(temp);
}
Physics->phaseListHead[iCell]->phase = -1;
Physics->phaseListHead[iCell]->weight = 0.0;
Physics->phaseListHead[iCell]->next = NULL;
}
}
void Physics_check(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Char* Char = &(Model->Char);
printf("=== Physics_check ===\n");
int iCell, ix, iy;
compute* Data;
int iData;
int nData = 9;
#if (HEAT)
nData +=1;
#endif
#if (DARCY)
nData +=6;
#endif
compute s = Char->time; // second
compute m = Char->length; // meter
compute kg = Char->mass; // kilogram
#if (HEAT)
compute K = Char->temperature; // Kelvin
// Other units
compute J = kg*m*m/(s*s); // Joule
compute W = kg*m*m/(s*s*s); // Watt
#endif
compute Pa = kg/m/s/s; // Pascal
compute Pas = kg/m/s; // Poise, Pa.s
//compute mol = 1.0;
bool Dim = true;
compute unit = 1.0;
for (iData = 0; iData < nData; ++iData) {
switch (iData) {
case 0:
printf("===== G =====\n");
Data = Physics->G;
if (Dim) unit = Pa;
break;
case 1:
printf("===== eta =====\n");
Data = Physics->eta;
if (Dim) unit = Pas;
break;
case 2:
printf("===== khi =====\n");
Data = Physics->khi;
if (Dim) unit = Pas;
break;
case 3:
printf("===== Z =====\n");
Data = Physics->Z;
if (Dim) unit = Pas;
break;
case 4:
printf("===== rho =====\n");
Data = Physics->rho;
if (Dim) unit = kg/m/m/m ;
break;
case 5:
printf("===== sigma_xx_0 =====\n");
Data = Physics->sigma_xx_0;
if (Dim) unit = Pa;
break;
case 6:
printf("===== Dsigma_xx_0 =====\n");
Data = Physics->Dsigma_xx_0;
if (Dim) unit = Pa;
break;
case 7:
printf("===== sumOfWeightsCells =====\n");
Data = Physics->sumOfWeightsCells;
if (Dim) unit = 1.0;
break;
case 8:
printf("===== P =====\n");
Data = Physics->P;
if (Dim) unit = Pa;
break;
case 9:
#if (HEAT)
printf("===== T =====\n");
Data = Physics->T;
if (Dim) unit = K;
#endif
break;
case 10:
#if (DARCY)
printf("===== phi =====\n");
Data = Physics->phi;
if (Dim) unit = 1.0;
#endif
break;
case 11:
#if (DARCY)
printf("===== Pc =====\n");
Data = Physics->Pc;
if (Dim) unit = Pa;
#endif
break;
case 12:
#if (DARCY)
printf("===== Pf =====\n");
Data = Physics->Pf;
if (Dim) unit = Pa;
#endif
break;
case 13:
#if (DARCY)
printf("===== khi_b =====\n");
Data = Physics->khi_b;
if (Dim) unit = Pas;
#endif
break;
case 14:
#if (DARCY)
printf("===== eta_b =====\n");
Data = Physics->eta_b;
if (Dim) unit = Pas;
#endif
break;
case 15:
#if (DARCY)
printf("===== perm =====\n");
Data = Physics->perm_eta_f;
if (Dim) unit = Physics->eta_f * m*m ;
#endif
break;
}
printf("Char unit = %.2e\n",unit);
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix+iy*Grid->nxEC;
printf("%.2e ", Data[iCell]*unit);
}
printf("\n");
}
}
}
void Physics_NodeVal_advectEulerian(compute *A, Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
compute* Anew = (compute*) malloc(Grid->nSTot * sizeof(compute));
int ix, iy;
int iC, iN, iS, iW, iE, iVxN, iVxS, iVyW, iVyE;
compute dAdx_W, dAdx_E, dAdy_S, dAdy_N;
compute dx = Grid->dx;
compute dy = Grid->dy;
compute dt = Physics->dt;
compute Vx, Vy;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
// Cell indices
iC = ix + (iy )*Grid->nxS;
iN = ix + (iy+1)*Grid->nxS;
iS = ix + (iy-1)*Grid->nxS;
iW = ix-1 + (iy )*Grid->nxS;
iE = ix+1 + (iy )*Grid->nxS;
iVxS = ix + (iy )+Grid->nxVx;
iVxN = ix + (iy+1)+Grid->nxVx;
iVyW = ix + (iy )*Grid->nxVy;
iVyE = ix+1 + (iy )*Grid->nxVy;
dAdx_W = (A[iC] - A[iW])/dx;
dAdx_E = (A[iE] - A[iC])/dx;
dAdy_S = (A[iC] - A[iS])/dy;
dAdy_N = (A[iN] - A[iC])/dy;
Vx = .5*( Physics->Vx[iVxS] + Physics->Vx[iVxN]);
Vy = .5*( Physics->Vy[iVyW] + Physics->Vy[iVyE]);
Anew[iC] = A[iC] + dt* ( - Vx * .5*(dAdx_W + dAdx_E) - Vy*.5*(dAdy_S + dAdy_N) );
}
}
for (iy = 1; iy < Grid->nyS-1; ++iy) {
for (ix = 1; ix < Grid->nxS-1; ++ix) {
iC = ix + (iy )*Grid->nxS;
A[iC] = Anew[iC];
}
}
// Values should be copied as well
//Physics_CellVal_SideValues_copyNeighbours_Global(A, Grid);
free(Anew);
}
|
matrix.c |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "my_timers.h"
#define M 10
/**
* @brief Get the Cofactor object
*
* @param mat
* @param temp
* @param p
* @param q
* @param n
*/
void getCofactor(long** mat, long** temp, int p,
int q, int n)
{
int i = 0, j = 0;
// Looping for each element of the matrix
for (int row = 0; row < n; row++)
{
for (int col = 0; col < n; col++)
{
// Copying into temporary matrix only those
// element which are not in given row and
// column
if (row != p && col != q)
{
temp[i][j++] = mat[row][col];
// Row is filled, so increase row index and
// reset col index
if (j == n - 1)
{
j = 0;
i++;
}
}
}
}
}
/**
* @brief Calculates determinant of "mat" n*n size matrix
* @param mat - pointer to long type matrix
* @param n - size of square matrix
* @return determinant of "mat" matrix, type long
*/
long determinantOfMatrix(long** mat, int n)
{
long D = 0; // Initialize result
// Base case : if matrix contains single element
if (n == 1){
return mat[0][0];
}
long **temp = NULL;
temp = (long**)malloc(n*sizeof(long));
int i = 0;
int j = 0;
for(i=0; i < n; i++) {
temp[i] = (long*)malloc(sizeof(long)*M);
}
int sign = 1; // To store sign multiplier
// Iterate for each element of first row
for (j = 0; j < n; j++)
{
// Getting Cofactor of mat[0][f]
getCofactor(mat, temp, 0, j, n);
D += sign * mat[0][j]
* determinantOfMatrix(temp, n - 1);
// terms are to be added with alternate sign
sign = -sign;
}
for(i = 0; i < n; i++){
free(temp[i]);
}
free(temp);
return D;
}
void main(void){
int m = M;
long det = 0;
time_t tt;
int tim = 0;
srand(tim);
int i = 0;
int j = 0;
int y = 0;
long **a = NULL;
a = (long**)malloc(M*sizeof(long*));
for(i=0; i < m; i++) {
a[i] = (long*)malloc(sizeof(long*)*M);
}
if(a != NULL){
int j = 0;
for(i = 0; i < M; i++){
for(j = 0; j < M; j++){
a[i][j] = (long)(rand()%5);
printf(" %d ", a[i][j]);
}
printf("\n");
}
printf("********\n");
}
start_time();
long **b = NULL;
omp_set_num_threads(2);
#pragma omp parallel for private(i, j, b) reduction(+:det)
for(y = 0; y < M; y++){
b = (long**)malloc(M*sizeof(long*));
for(i=0; i < m; i++) {
b[i] = (long*)malloc(sizeof(long*)*M);
}
for(int x = 0; x < M; x++){
for(int g = 0; g < M; g++){
b[x][g] = 0;
}
}
getCofactor(a, b, 0, y, m);
if(y%2){
det += (-1)*a[0][y]*determinantOfMatrix(b, m-1);
}
else{
det += a[0][y]*determinantOfMatrix(b, m-1);
}
for(i=0; i < m; i++) {
free(b[i]);
}
free(b);
}
stop_time();
print_time("Elapsed time for n threads:");
long det2 = 0;
start_time();
det2 = determinantOfMatrix(a, m);
stop_time();
print_time("Elapsed time for normal:");
printf("Det: %d\n", det);
printf("Det2: %d\n", det2);
for(i=0; i < m; i++) {
free(a[i]);
}
free(a);
} |
icv-threads-openmp3x.c | /*******************************************************************
OpenMP-3.0 Example Codes Beta-v1.0
File : icv-threads-openmp3x.c
Description : Simple example program to demonstrate the use of
OpenMP Library calls to change the default values
of the internal control variables.
- omp_set_nested() change ICV nest-vars : which enable
of disable the nested parallelism
- omp_set_max_active_levels() change ICV max_active-levels-var
:which limits the number of nested active parallel regions
- omp_set_dynamic() change ICV dyn-vars : which enable or disable
the dynamic adjustment of the nuber of threads vailable for the
execution of subsequent parallel region.
- omp_set_num_threads() change ICV nthread-vars : which sets the
number of threads for the next parallel region.
OpenMP Pragma /
Function Used :
- omp_set_nested()
- omp_set_max_active_levels()
- omp_set_dynamic()
- omp_set_num_threads()
- omp_get_max_active_levels(),
- omp_get_num_threads(),
- omp_get_max_threads()
Input : None
Output : Values of Internal Control Variables
**********************************************************************/
/* Header file inclusion */
#include <stdio.h>
#include <omp.h>
/* main function */
int main (void)
{
/* OpenMP library functions to change the default values
of the internal control variable */
omp_set_nested(1); /* Enable the Nested Parallel region */
omp_set_max_active_levels(8); /* Enable the maximum active levels */
omp_set_dynamic(0); /* Disable the dynamic thread creation */
omp_set_num_threads(2); /* Set the no. of threads to 2 */
/* Outer : Create the parallel region */
#pragma omp parallel
{
omp_set_num_threads(3);
/* Inner: Create the parallel region inside the outer parallel region */
#pragma omp parallel
{
omp_set_num_threads(4);
#pragma omp single /* Restricting the one thread to do the work */
{
/*
* The following should print:
* Inner: max_act_lev=8, num_thds=3, max_thds=4
* Inner: max_act_lev=8, num_thds=3, max_thds=4
*/
printf ("\n\t\t Inner: max_act_lev=%d, num_thds=%d,max_thds=%d\n",omp_get_max_active_levels(), omp_get_num_threads(),omp_get_max_threads());
}
} /* End of inner parallel region */
#pragma omp barrier /* Syncronization point */
#pragma omp single /* Outer: Restricting the one thread to do the work */
{
/*
* The following should print:
* Outer: max_act_lev=8, num_ thds=2, max_thds=3
*/
printf ("\n\t\t Outer: max_act_lev=%d, num_thds=%d,max_thds=%d\n",omp_get_max_active_levels(), omp_get_num_threads(),omp_get_max_threads());
}
} /* End of outer parallel region */
} /* End of main function */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
lut[NumberCLAHEGrays];
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaCorrectImageTag,progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
nUtil.h | //
// Created by Bangtian Liu on 5/2/19.
//
#ifndef PROJECT_NUTIL_H
#define PROJECT_NUTIL_H
#include <vector>
#include <algorithm>
#include <omp.h>
#include <map>
#include "config.h"
#include <cmath>
#include <random>
using namespace std;
namespace Sympiler {
namespace Internal {
struct retvalue{
int *skels;
int skels_length;
double *proj;
int proj_column;
};
typedef retvalue Ret;
struct Dcost{
int index;
unsigned long cost;
};
bool compare(Dcost lhs, Dcost rhs);
template <typename T>
__inline__ T distance(T *point1, T *point2, int dim)
{
T sum=0;
for(int i=0;i<dim;i++){
sum+=(point2[i]-point1[i])*(point2[i]-point1[i]);
}
return std::sqrt(sum);
}
template <typename T>
__inline__ T *Mean(int *lids, int length, Setup setup)
{
int n_split = omp_get_max_threads();
T *temp = (T *)malloc(sizeof(T)*setup.d*n_split);
memset(temp,0,sizeof(T)*setup.d*n_split);
T *mean = (T *)malloc(sizeof(T)*setup.d);
memset(mean,0,sizeof(T)*setup.d);
auto X=setup.X;
int d = setup.d;
// #pragma omp parallel for num_threads(n_split)
for ( int j = 0; j < n_split; j ++ )
for ( int i = j; i < length; i += n_split )
for ( int p = 0; p < setup.d; p ++ )
temp[ j * d + p ] += X[ lids[ i ] * d + p ];
for ( int j = 0; j < n_split; j ++ ) {
//#pragma omp parallel for num_threads(n_split)
for (int p = 0; p < d; p++)
mean[p] += temp[j * d + p];
}
for ( int p = 0; p < d; p ++ ) mean[ p ] /= length;
return mean;
}
__inline__ int level(int node)
{
return (int) floor(log(node+1.0)/log(2));
}
inline double dist2(double* x, double* y, int d) {
double k = 0.;
for (int i=0; i<d; i++) k += pow(x[i] - y[i], 2.);
return k;
}
inline double dist(double* x, double* y, int d) {
return sqrt(dist2(x, y, d));
}
void write2binary(std::string file, int *matrix, uint64_t len);
//
//void k_means(int k, double* p, int n, int d, int* nc);
void k_means(int k, double* p, int n, int d, int* nc, std::vector<int> lids, std::vector<std::vector<int>> &clusters);
int decomposition(double *A, int nRows, int nCols, double tolerance, int **skels, double **proj, int **jpvt, Setup &setup);
void Fsubmatrix(std::vector<int> &amap, std::vector<int> &bmap, double *submatrix, Setup setup);
void randn(int nrow, int ncol, double * array, double a, double b);
void submatrix(std::vector<int> &amap, std::vector<int> &bmap, double *submatrix, Setup &setup);
void Fsubmatrix(int *amap, int lena, int *bmap, int lenb, double *submatrix, Setup &setup);
void writepair2txt(std::string file, int *pair, int len);
int findMin(uint64_t *cost, int size);
void write2binary(std::string file, double *matrix, uint64_t len);
void writeoffset2binary(std::string file, int *offset, int len);
void write2txt(std::string file, unsigned long int *offset, int len);
void write2txt(std::string file, int *offset, int len);
__inline__ int begin(int l)
{
return (1<<l)- 1;
}
__inline__ int stop(int l)
{
return (1<<(l+1))-2;
}
void HeapAdjust(
int s, int n,
std::pair<double , int> *NN);
void HeapSelect( int n, int k, std::pair<double , int> *Query, std::pair<double , int> *NN);
template<typename TA, typename TB>
std::pair<TB, TA> flip_pair( const std::pair<TA, TB> &p )
{
return std::pair<TB, TA>( p.second, p.first );
}; /** end flip_pair() */
template<typename TA, typename TB>
std::multimap<TB, TA> flip_map( const std::map<TA, TB> &src )
{
std::multimap<TB, TA> dst;
std::transform( src.begin(), src.end(), std::inserter( dst, dst.begin() ),
flip_pair<TA, TB> );
return dst;
};
}
}
#endif //PROJECT_NUTIL_H
|
anglehist.c |
// Utility to parse lost particle data into angle-resolved spectra
//
// Usage:
// ./anglehist [species name] optional: [maximum energy in MeV]
//
//
// icc -fopenmp -o anglehist anglehist.c
//
// Requires OpenMP 4.5 (gcc 6.1 or Intel 17.0), or you may run slowrially
//
// This program must match how the lost particle data are written as defined in
// your deck. TODO: Update this all to hdf5 or similar, so you don't have to
// know the data layout bit for bit.
//
// 2Dbinhistplot.py will make a nice plot for you using the output of this
// program.
//
// All the particles with energies above the maximum of the histogram will be
// artificially placed in the highest bin.
//
// First release written by Scott V. Luedtke, XCP-6, October 4, 2019.
#include <stdio.h>
#include <stdlib.h> /* for atoi */
#include <stdarg.h> /* for va_list, va_start, va_end */
#include <errno.h>
#include <string.h> /* for strcspn */
#include <math.h> /* for sqrt */
#include <sys/stat.h> /* for mkdir */
#include <stdint.h> /* for uint32_t, uint64_t */
#include <inttypes.h> /* to print uint64_t */
#include <glob.h> /* to get list of filenames */
#define BEGIN_PRIMITIVE do
#define END_PRIMITIVE while (0)
void print_log( const char *fmt, ... );
#define ERROR(args) BEGIN_PRIMITIVE { \
print_log( "Error at %s(%i):\n\t", __FILE__, __LINE__ ); \
print_log args; \
print_log( "\n" ); \
} END_PRIMITIVE
#define ABORT(cond) { if (cond) exit(0); }
//---------------------------------------------------------------------
// General purpose memory allocation macro
#define ALLOCATE(A,LEN,TYPE) \
if ( !((A)=(TYPE *)malloc((size_t)(LEN)*sizeof(TYPE))) ) \
ERROR(("Cannot allocate."));
// Construct an index for the particle data. This must match how the data are
// output in the lost particle processor in your VPIC deck.
#define ux 0
#define uy 1
#define uz 2
#define w 6
#define x 3
#define y 4
#define z 5
#define numvars 7
int main( int argc, char *argv[] ) {
fprintf(stderr,"Ham.\n");
int num_tracers_total, nprocs;
int nvar;
int itmax;
char temp[256];
char usage_msg[] =
"Usage: ./lostspec [species name] optional: [maximum energy in MeV]\n\n";
if ( argc != 2 && argc != 3) {
fprintf( stderr, "%s", usage_msg );
exit(0);
}
// Read some numbers from params.txt
char buffer[1024];
//int interval, nstep_total, i;
FILE *params;
params = fopen("../params.txt", "r");
if (!params) ERROR(("Cannot open params.txt. (The location is probably wrong.)"));
double timeToSI, lengthToSI, massToSI, chargeToSI;
fgets(buffer, 1024, params);
fscanf(params, "%lf %[^\n]\n", &timeToSI, buffer);
fscanf(params, "%lf %[^\n]\n", &lengthToSI, buffer);
fscanf(params, "%lf %[^\n]\n", &massToSI, buffer);
fscanf(params, "%lf %[^\n]\n", &chargeToSI, buffer);
fclose(params);
double emin = 0;
double emax; // MeV
double Ecut = 0;
char *particle = argv[1];
if (argc>2) emax = atof(argv[2]);
else emax = 500;
double amax = M_PI;
double amin = 0;
// Must define these to use in the reduction clause
#define nbinse 100
#define nbinsa 100
double de = emax/(double)nbinse;
double da = amax/(double)nbinsa;
// When you have 10^13 particles at 10^8 precision, you might need extended
// precision for the sums.
long double hist[nbinsa][nbinse] = {0};
// The code uses normalized momentum, so don't use the conversion factors
// from params.txt. If you want to use different units than here, change
// this, or manually adjust them in your plotter.
#define e_SI (1.602176634e-19) /* C */
#define c_SI (2.99792458e8) /* m / s */
#define m_e_SI (9.1093837015e-31) /* kg */
#define mp_me 1836.15267343
double ekMeVconst;
double elecekMeVconst = m_e_SI*c_SI*c_SI*1e-6/e_SI;
double carbekMeVconst = 12.*mp_me*elecekMeVconst;
double protonekMeVconst = mp_me*elecekMeVconst;
if (strcmp(particle, "I2")==0) ekMeVconst = carbekMeVconst;
else if (strcmp(particle, "proton")==0){
ekMeVconst = protonekMeVconst;
particle = "I2";
}
else ekMeVconst = elecekMeVconst;
//TODO: Race this (untested!) bit of code against glob on a large VPIC run on
//Lustre
//// Count how many files there are in the lostparts directory
//char dirname[256] = "../../lostparts";
//struct dirent *de;
//DIR *dir = opendir(dirname);
//if(!dir) ERROR("Directory %s not found", dirname);
//int count=0;
//while (de = readdir(dir)) ++count;
//closedir(dir);
//// Construct the (massive) list of files
//char **filelist;
//ALLOCATE(filelist, count, char*);
//dir = opendir(dirname);
//for(int i=0;i<count;i++){
// ALLOCATE(filelist[i], 64, char);
// filelist[i] = readdir(dir)->d_name;
//}
char filepath[256]; // Better be big enough
sprintf(filepath, "../pb_diagnostic/%s.*", particle);
glob_t globbuf;
glob(filepath, GLOB_NOSORT, NULL, &globbuf);
size_t count = globbuf.gl_pathc;
fprintf(stderr, "Found %zu\n files", count);
// Consider only particles within a certain box
// Check if the line that does this is commented below!
double xmin = -30e-6/lengthToSI;
double xmax = 70e-6/lengthToSI;
double ymin = -10e-6/lengthToSI;
double ymax = 10e-6/lengthToSI;
double zmin = 0;//-17e-6/lengthToSI;
double zmax = 17e-6/lengthToSI;
long double Etot=0;
unsigned long long int ntot=0;
long double wsum=0;
#pragma omp parallel
{
// Implicitly private variables declared outside of the loop
char filename[256];
float partf[numvars];
double part[numvars];
float u2,ek,thet;
int ebin, abin, j;
size_t i;
unsigned long int counter;
FILE *data;
#pragma omp for schedule(guided) reduction(+:hist, Etot, ntot, wsum)
for(i=0;i<count;i++){
// Check if the filename is one we want
//if (!strncmp(filelist[i], "boundary", 8)) continue;
sprintf(filename, "%s", globbuf.gl_pathv[i]);
//fprintf(stderr, "Working on file %s\n", filename);
counter=0;
data = fopen(filename, "rb");
if (data == NULL) ERROR(("Cannot open file %s\n", filename));
while(1){
if (fread(partf, sizeof(float), numvars, data) != numvars){
//printf("fread failed !!!! \n\nEOF was not set!!!!\n\n");
}
if (feof(data)){
//printf("breaking\n");
break;
}
// Cast to double precission to avoid "wierd" floting point edge cases
for (j=0;j<numvars;j++) part[j] = partf[j];
//fprintf(stdout, "Pos is %g %g %g %g\n", part[x], part[y], part[z], part[ux]);
// If not in box, ignore
//if (part[x]<xmin || part[x]>xmax || part[z]<zmin || part[z]>zmax) continue;
//fprintf(stderr, "starting counter %d\n", counter);
// Get energy index
u2 = part[ux]*part[ux] + part[uy]*part[uy] + part[uz]*part[uz];
// I could save a multiply in this innermost loop by doing all the
// binning in normalized units, but the performance is dominated by
// disk access, so let's keep things a bit more understandable
ek = ekMeVconst * u2/ (1. + sqrt(1.+ u2));
if (ek < Ecut) continue;
Etot += ek*part[w]; // Increment Etot before putting it in the hist
if (ek > emax) ek = emax-0.5*de;
ebin = (int)(ek/de);
// Get angle index
thet = acosf(part[ux]/sqrtf(u2));
abin = (int)(thet/da);
// Especially in single precision, the argument of acos above can be
// negative unity, necessitating this edge case.
if (abin==nbinsa) abin--;
if(ebin >= nbinse || abin >= nbinsa || abin < 0){
fprintf(stderr, "ebin is %d abin is %d count is %d\n", ebin, abin, counter);
fprintf(stderr, "ux is %.18e uy is %.18e uz is %.18e\n", part[ux], part[uy], part[uz]);
fprintf(stderr, "The ux is %.18e and the fraction is %.18e\n", part[ux], part[ux]/sqrtf(u2));
fprintf(stderr, "is equal evaluates to %d\n", -1.*part[ux]==sqrtf(u2));
fprintf(stderr, "ux/sqrtf(u2) is %.18e\nsqrtf(u2)/ux is %.18e\n", sqrtf(u2)/part[ux], part[ux]/sqrtf(u2));
ntot++;
continue;
//ERROR(("You're probably about to segfault"));
}
hist[abin][ebin] += part[w];
wsum += part[w];
ntot++;
counter++;
//fprintf(stderr, "Done with this one\n");
}
fclose(data);
fprintf(stderr, "File %zu had %lu entries\n", i, counter);
}
}
count = globbuf.gl_pathc;
globfree(&globbuf);
fprintf(stdout, "The total number of simulation particles used is %lld\n", ntot);
fprintf(stdout, "The total number of physical particles, assuming you didn't do anything weird in your deck, is %Lg\n", wsum);
fprintf(stdout, "The total energy in the particles is %Lg MeV, or %Lg J.\n", Etot, Etot*1e6*e_SI);
// Normalize the angle bins
// The normalization is 4*pi steradians / the solid angle subtended by the bin
// Let the compiler handle any parallelization here.
int i,j;
double norm;
for(i=0;i<nbinsa;i++){
norm = 2./((cos(da*i)-cos(da*(i+1)))*de);
for(j=0;j<nbinse;j++)
hist[i][j] *= norm;
}
// Write the hist params for the Python plotter to read
FILE * out;
sprintf(temp, "%s%s", particle, "anglehistparams.txt");
out = fopen(temp, "w");
fprintf(out, "# Parameter file used for the Python 2D hist plotter.\n");
fprintf(out, "%.14e Angle minimum.\n", amin);
fprintf(out, "%.14e Angle maximum\n", amax);
fprintf(out, "%.14e Energy minimum.\n", emin);
fprintf(out, "%.14e Energy maximum\n", emax);
fprintf(out, "%d Number of bins in angle\n", nbinsa);
fprintf(out, "%d Number of bins in energy\n", nbinse);
fprintf(out, "%s Particle species\n", particle);
fprintf(out, "%.14e ekMeVconst\n", ekMeVconst);
fprintf(out, "%lld Number of particles used\n", ntot);
fprintf(out, "%.14e Total Energy in histogram (MeV)\n", Etot);
fprintf(out, "%lld Number of physical particles in histogram\n", wsum);
fclose(out);
// Store the histogram for Python plotting
// Cast to double so numpy can understand
double histD[nbinsa][nbinse];
for(i=0;i<nbinsa;i++)
for(j=0;j<nbinse;j++)
histD[i][j] = hist[i][j];
sprintf(temp, "%s%s", particle, "lostspec.bin");
out = fopen(temp, "w");
for(i=0;i<nbinsa;i++)
fwrite(histD[i], sizeof(double), nbinse, out);
fclose(out);
return 0;
} // main
//---------------------------------------------------------------------
// For ERROR macro
//
void print_log( const char *fmt, ... ) {
va_list ap;
va_start( ap, fmt );
vfprintf( stderr, fmt, ap );
va_end( ap );
fflush( stderr );
} // print_log
|
mathint.c | #include "mathint.h"
#if (defined __AVX__ || defined __SSE4_2__)
#include "math_x86.h"
#endif
double GL_value[] = {
0.0000875560264341,
0.0004612700113121,
0.0011333756872430,
0.0021036207325094,
0.0033714435498935,
0.0049360907541328,
0.0067966286377069,
0.0089519457821408,
0.0114007542680463,
0.0141415906264317,
0.0171728167840174,
0.0204926210731500,
0.0240990193293678,
0.0279898560848899,
0.0321628058610418,
0.0366153745605260,
0.0413449009595198,
0.0463485582991216,
0.0516233559754209,
0.0571661413273014,
0.0629736015209841,
0.0690422655302257,
0.0753685062110155,
0.0819485424695466,
0.0887784415221781,
0.0958541212460431,
0.1031713526189034,
0.1107257622467940,
0.1185128349779526,
0.1265279166014690,
0.1347662166290456,
0.1432228111582063,
0.1518926458152429,
0.1607705387761404,
0.1698511838636770,
0.1791291537188462,
0.1885989030447076,
0.1982547719207257,
0.2080909891856185,
0.2181016758866909,
0.2282808487935948,
0.2386224239744122,
0.2491202204319278,
0.2597679637979140,
0.2705592900832239,
0.2814877494814479,
0.2925468102238625,
0.3037298624833663,
0.3150302223250705,
0.3264411357011823,
0.3379557824877933,
0.3495672805611614,
0.3612686899110478,
0.3730530167886529,
0.3849132178866700,
0.3968422045489604,
0.4088328470073314,
0.4208779786428876,
0.4329704002694061,
0.4451028844361781,
0.4572681797477423,
0.4694590151979302,
0.4816681045156332,
0.4938881505196921,
0.5061118494803079,
0.5183318954843668,
0.5305409848020698,
0.5427318202522577,
0.5548971155638218,
0.5670295997305939,
0.5791220213571124,
0.5911671529926685,
0.6031577954510396,
0.6150867821133300,
0.6269469832113471,
0.6387313100889522,
0.6504327194388386,
0.6620442175122067,
0.6735588642988177,
0.6849697776749295,
0.6962701375166337,
0.7074531897761375,
0.7185122505185521,
0.7294407099167761,
0.7402320362020860,
0.7508797795680722,
0.7613775760255878,
0.7717191512064052,
0.7818983241133091,
0.7919090108143816,
0.8017452280792743,
0.8114010969552925,
0.8208708462811538,
0.8301488161363231,
0.8392294612238596,
0.8481073541847571,
0.8567771888417937,
0.8652337833709545,
0.8734720833985310,
0.8814871650220474,
0.8892742377532059,
0.8968286473810967,
0.9041458787539569,
0.9112215584778219,
0.9180514575304535,
0.9246314937889845,
0.9309577344697743,
0.9370263984790159,
0.9428338586726985,
0.9483766440245791,
0.9536514417008783,
0.9586550990404803,
0.9633846254394740,
0.9678371941389582,
0.9720101439151101,
0.9759009806706322,
0.9795073789268500,
0.9828271832159826,
0.9858584093735683,
0.9885992457319537,
0.9910480542178592,
0.9932033713622931,
0.9950639092458672,
0.9966285564501065,
0.9978963792674906,
0.9988666243127571,
0.9995387299886880,
0.9999124439735659
};
double GL_weight[] = {
0.0002246904801461,
0.0005229063396701,
0.0008212515093345,
0.0011191442154813,
0.0014163757357290,
0.0017127630204551,
0.0020081274918693,
0.0023022921283514,
0.0025950809163382,
0.0028863187714329,
0.0031758315808536,
0.0034634462834494,
0.0037489909628174,
0.0040322949452430,
0.0043131888993084,
0.0045915049358304,
0.0048670767075034,
0.0051397395079161,
0.0054093303697515,
0.0056756881620402,
0.0059386536863701,
0.0061980697719754,
0.0064537813696337,
0.0067056356443082,
0.0069534820664760,
0.0071971725020834,
0.0074365613010736,
0.0076715053844326,
0.0079018643296997,
0.0081275004548926,
0.0083482789007946,
0.0085640677115557,
0.0087747379135588,
0.0089801635925043,
0.0091802219686657,
0.0093747934702723,
0.0095637618049755,
0.0097470140293533,
0.0099244406164154,
0.0100959355210650,
0.0102613962434801,
0.0104207238903756,
0.0105738232341107,
0.0107206027696042,
0.0108609747690261,
0.0109948553342302,
0.0111221644468999,
0.0112428260163725,
0.0113567679251182,
0.0114639220718434,
0.0115642244121935,
0.0116576149970314,
0.0117440380082680,
0.0118234417922238,
0.0118957788905017,
0.0119610060683517,
0.0120190843405120,
0.0120699789945096,
0.0121136596114076,
0.0121501000839859,
0.0121792786323453,
0.0122011778169248,
0.0122157845489250,
0.0122230900981313,
0.0122230900981313,
0.0122157845489250,
0.0122011778169248,
0.0121792786323453,
0.0121501000839859,
0.0121136596114076,
0.0120699789945096,
0.0120190843405120,
0.0119610060683517,
0.0118957788905017,
0.0118234417922238,
0.0117440380082680,
0.0116576149970314,
0.0115642244121935,
0.0114639220718434,
0.0113567679251182,
0.0112428260163725,
0.0111221644468999,
0.0109948553342302,
0.0108609747690261,
0.0107206027696042,
0.0105738232341107,
0.0104207238903756,
0.0102613962434801,
0.0100959355210650,
0.0099244406164154,
0.0097470140293533,
0.0095637618049755,
0.0093747934702723,
0.0091802219686657,
0.0089801635925043,
0.0087747379135588,
0.0085640677115557,
0.0083482789007946,
0.0081275004548926,
0.0079018643296997,
0.0076715053844326,
0.0074365613010736,
0.0071971725020834,
0.0069534820664760,
0.0067056356443082,
0.0064537813696337,
0.0061980697719754,
0.0059386536863701,
0.0056756881620402,
0.0054093303697515,
0.0051397395079161,
0.0048670767075034,
0.0045915049358304,
0.0043131888993084,
0.0040322949452430,
0.0037489909628174,
0.0034634462834494,
0.0031758315808536,
0.0028863187714329,
0.0025950809163382,
0.0023022921283514,
0.0020081274918693,
0.0017127630204551,
0.0014163757357290,
0.0011191442154813,
0.0008212515093345,
0.0005229063396701,
0.0002246904801461
};
/* Incomplete Bessel with Adaptive Simpson rule*/
double f(double x, double bes_a, double bes_b, int der)
{
if(x==0)
return 0;
if(der==1)
return exp(-bes_a/x-bes_b*x);
else if(der==0)
return exp(-bes_a/x-bes_b*x)/x;
return 0;
}
double
simpson(double a, double b, double bes_a, double bes_b, int der)
{
double c = (a+b)/2.;
return (b-a)/6.*(f(a, bes_a, bes_b, der)+4.*f(c, bes_a, bes_b, der)+f(b, bes_a, bes_b, der));
}
double
errest(double a, double b, double *val, double bes_a, double bes_b, int der)
{
double c = (a+b)/2.;
double I = simpson(a,b, bes_a, bes_b, der);
*val = simpson(a,c, bes_a, bes_b, der) + simpson(c,b, bes_a, bes_b, der);
return fabs(I- (*val) );
}
double
IncompBesselK0_Simpson(double tol, int *cnt, double bes_a, double bes_b, int der)
{
int HEAD, n;
double res = 0.0, val;
double a, b;
/* FIXME: size of stack should be dynamic. One possibility is
* is to use vectors. Otherwise one can keep only the last value
* of the interval and if OK reset it to c2 again! (expensive)
*/
double *ST = (double*) malloc(STACK_SIZE*sizeof(double));
// First value in the stack is upper bound of the integral
ST[0] = 1.;
if( 15.*errest(0.,1.,&val, bes_a, bes_b,der)<tol)
return val;
a = 0.;
b = 1.;
HEAD = 0;
n=0; // to count the number of splits
for(;;)
{
if(15.*errest(a,b,&val, bes_a, bes_b,der)>tol)
{
if ( HEAD>=(STACK_SIZE-1) )
{
printf("Increase the stack size!\n");
exit(EXIT_FAILURE);
}
ST[++HEAD] = (a+b)/2.;
b = ST[HEAD];
n++;
}
else
{
res += val;
// if a==b or HEAD is pointing the the first element we break
if(HEAD == 0)
break;
a = ST[HEAD];
b = ST[--HEAD];
}
}
free(ST);
*cnt = n;
return res;
}
/* kernels for computing the modified bessel function of the
* second kind and its derivative */
double bessel_f(double x, void * p)
{
gsl_params * params = (gsl_params *)p;
double a = (params->a);
double b = (params->b);
return exp(-a/x-b*x)/x;
}
double bessel_f_der(double x, void * p)
{
gsl_params * params = (gsl_params *)p;
double a = (params->a);
double b = (params->b);
return exp(-a/x-b*x);
}
double call_gsl_bessel_integrator(double a, double b,
gsl_integration_workspace *w,
int der)
{
double result, error;
gsl_function F;
if(der==0)
F.function = &bessel_f;
else if(der==1)
F.function = &bessel_f_der;
if(a==0){
result = 1.e308;
return result;
}
if(a<b && 0){
gsl_params params = {b,a};
F.params = ¶ms;
gsl_integration_qags (&F, 0, 1, 0, 1e-12, 10000,
w, &result, &error);
double z = 2.*gsl_sf_bessel_K0(2.*sqrt(a*b));
result = z-result;
}
else{
gsl_params params = {a,b};
F.params = ¶ms;
gsl_integration_qags (&F, 0, 1, 0, 1e-12, 10000,
w, &result, &error);
}
return result;
}
double func(double x, double a, double b)
{
if(x==0)
return 0;
else
return exp(-a/x-b*x)/x;
}
double computeK0(double a, double b) {
// integration over [0,1] with "panels" panels
double s=0;
int panels = 4;
double x0;
double h = 1./panels;
for (int p=0;p<panels;p++) {
x0 = 0+p*h;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:s)
#endif
for (int i=0; i<128;i++) {
double x = x0 + h*GL_value[i];
s += GL_weight[i]*func(x,a,b)*h;
}
}
return s;
}
double computeINCBK0(double a, double b, int der) {
// integration over [0,1] with "panels" panels
double s=0;
int panels = 4;
double x0;
double h = 1./panels;
for (int p=0;p<panels;p++) {
x0 = 0+p*h;
/* #ifdef _OPENMP */
/* #pragma omp parallel for reduction(+:s) */
/* #endif */
for (int i=0; i<128;i++) {
double x = x0 + h*GL_value[i];
s += GL_weight[i]*f(x,a,b,der)*h;
}
}
return s;
}
|
GB_binop__isle_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int64)
// A*D function (colscale): GB (_AxD__isle_int64)
// D*A function (rowscale): GB (_DxB__isle_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int64)
// C=scalar+B GB (_bind1st__isle_int64)
// C=scalar+B' GB (_bind1st_tran__isle_int64)
// C=A+scalar GB (_bind2nd__isle_int64)
// C=A'+scalar GB (_bind2nd_tran__isle_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT64 || GxB_NO_ISLE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
size_t
i;
size_t
number_coefficients, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coefficients=0;
switch (*method) {
case AffineDistortion:
case RigidAffineDistortion:
/* also BarycentricColorInterpolate: */
number_coefficients=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coefficients = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coefficients=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coefficients=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coefficients=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coefficients=1; /* The power factor to use */
break;
case ArcDistortion:
number_coefficients=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coefficients=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coefficients=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coefficients=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coefficients=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff=(double *) AcquireQuantumMemory(number_coefficients,sizeof(*coeff));
if (coeff == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coefficients; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix=AcquireMagickMatrix(3UL,3UL);
vectors=(double **) AcquireQuantumMemory(number_values,
sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case RigidAffineDistortion:
{
double
inverse[6],
**matrix,
terms[5],
*vectors[1];
MagickBooleanType
status;
/*
Rigid affine (also known as a Euclidean transform), restricts affine
coefficients to 4 (S, R, Tx, Ty) with Sy=Sx and Ry = -Rx so that one has
only scale, rotation and translation. No skew.
*/
if (((number_arguments % cp_size) != 0) || (number_arguments < cp_size))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions,*method),2.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/*
Rigid affine requires a 4x4 least-squares matrix (zeroed).
*/
matrix=AcquireMagickMatrix(4UL,4UL);
if (matrix == (double **) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
CommandOptionToMnemonic(MagickDistortOptions,*method));
return((double *) NULL);
}
/*
Add control points for least squares solving.
*/
vectors[0]=(&(coeff[0]));
for (i=0; i < number_arguments; i+=4)
{
terms[0]=arguments[i+0];
terms[1]=(-arguments[i+1]);
terms[2]=1.0;
terms[3]=0.0;
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+2]),4UL,1UL);
terms[0]=arguments[i+1];
terms[1]=arguments[i+0];
terms[2]=0.0;
terms[3]=1.0;
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+3]),4UL,1UL);
}
/*
Solve for least-squares coefficients.
*/
status=GaussJordanElimination(matrix,vectors,4UL,1UL);
matrix=RelinquishMagickMatrix(matrix,4UL);
if (status == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions,*method));
return((double *) NULL);
}
/*
Convert (S, R, Tx, Ty) to an affine projection.
*/
inverse[0]=coeff[0];
inverse[1]=coeff[1];
inverse[2]=(-coeff[1]);
inverse[3]=coeff[0];
inverse[4]=coeff[2];
inverse[5]=coeff[3];
AffineArgsToCoefficients(inverse);
InvertAffineCoefficients(inverse,coeff);
*method=AffineDistortion;
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix=AcquireMagickMatrix(4UL,4UL);
vectors=(double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix=AcquireMagickMatrix(nterms,nterms);
vectors=(double **) AcquireQuantumMemory(number_values,
sizeof(*vectors));
terms=(double *) AcquireQuantumMemory(nterms,sizeof(*terms));
if ((matrix == (double **) NULL) || (vectors == (double **) NULL) ||
(terms == (double *) NULL))
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has no alpha channel, so we are free to use it.
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen,MagickPathExtent,
" -size %.20gx%.20g -page %+.20g%+.20g xc: +insert \\\n",
(double) geometry.width,(double) geometry.height,(double) geometry.x,
(double) geometry.y);
lookup="v.p{xx-v.page.x-0.5,yy-v.page.y-0.5}";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{xx-page.x-0.5,yy-page.y-0.5}"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%.*g,",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[5]);
(void) FormatLocaleFile(stderr,
"Equivalent scale, rotation(deg), translation:\n");
(void) FormatLocaleFile(stderr," %.*g,%.*g,%.*g,%.*g\n",
GetMagickPrecision(),sqrt(inverse[0]*inverse[0]+
inverse[1]*inverse[1]),GetMagickPrecision(),
RadiansToDegrees(atan2(inverse[1],inverse[0])),
GetMagickPrecision(),inverse[4],GetMagickPrecision(),inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Affine distort, FX equivalent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+.*g*ii %+.*g*jj %+.*g;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+.*g*ii %+.*g*jj %+.*g;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates,
so that is what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
ssize_t
i;
Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
ssize_t
i;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
GB_binop__isle_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp64)
// A*D function (colscale): GB (_AxD__isle_fp64)
// D*A function (rowscale): GB (_DxB__isle_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp64)
// C=scalar+B GB (_bind1st__isle_fp64)
// C=scalar+B' GB (_bind1st_tran__isle_fp64)
// C=A+scalar GB (_bind2nd__isle_fp64)
// C=A'+scalar GB (_bind2nd_tran__isle_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
se2ramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *se2ramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *se2ramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y,*rampparams;
double goal,r0,r1,r4,r5,pm0,pm1;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
goal = IND(rampparams,0);
r0 = IND(rampparams,1);
r1 = IND(rampparams,2);
pm0 = IND(rampparams,3);
r4 = IND(rampparams,4);
r5 = IND(rampparams,5);
pm1 = IND(rampparams,6);
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = goal + pm0*exp(-r0*IND(x,i) + r1) + pm1*exp(-r4*IND(x,i) + r5);
}
return PyArray_Return(y);
}
static char module_docstring[]="\
This function creates a model that fits a ramp using a rising exponential.\n\
\n\
Parameters\n\
----------\n\
goal: goal as x -> inf\n\
m1,m2: rise exp\n\
t1,t2: time offset\n\
t: Array of time/phase points\n\
\n\
Returns\n\
-------\n\
This function returns an array of y values by combining an eclipse and a rising exponential\n\
\n\
Revisions\n\
---------\n\
2010-07-30 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\
2010-12-24 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to C\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
";
static PyMethodDef module_methods[] = {
{"se2ramp",(PyCFunction)se2ramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_se2ramp(void)
#else
initse2ramp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"se2ramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("se2ramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
convolution_1x1_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
unsigned short* g00 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
g00[0] = float32_to_float16(k00[0]);
g00[1] = float32_to_float16(k10[0]);
g00[2] = float32_to_float16(k20[0]);
g00[3] = float32_to_float16(k30[0]);
g00[4] = float32_to_float16(k40[0]);
g00[5] = float32_to_float16(k50[0]);
g00[6] = float32_to_float16(k60[0]);
g00[7] = float32_to_float16(k70[0]);
g00 += 8;
g00[0] = float32_to_float16(k01[0]);
g00[1] = float32_to_float16(k11[0]);
g00[2] = float32_to_float16(k21[0]);
g00[3] = float32_to_float16(k31[0]);
g00[4] = float32_to_float16(k41[0]);
g00[5] = float32_to_float16(k51[0]);
g00[6] = float32_to_float16(k61[0]);
g00[7] = float32_to_float16(k71[0]);
g00 += 8;
g00[0] = float32_to_float16(k02[0]);
g00[1] = float32_to_float16(k12[0]);
g00[2] = float32_to_float16(k22[0]);
g00[3] = float32_to_float16(k32[0]);
g00[4] = float32_to_float16(k42[0]);
g00[5] = float32_to_float16(k52[0]);
g00[6] = float32_to_float16(k62[0]);
g00[7] = float32_to_float16(k72[0]);
g00 += 8;
g00[0] = float32_to_float16(k03[0]);
g00[1] = float32_to_float16(k13[0]);
g00[2] = float32_to_float16(k23[0]);
g00[3] = float32_to_float16(k33[0]);
g00[4] = float32_to_float16(k43[0]);
g00[5] = float32_to_float16(k53[0]);
g00[6] = float32_to_float16(k63[0]);
g00[7] = float32_to_float16(k73[0]);
g00 += 8;
g00[0] = float32_to_float16(k04[0]);
g00[1] = float32_to_float16(k14[0]);
g00[2] = float32_to_float16(k24[0]);
g00[3] = float32_to_float16(k34[0]);
g00[4] = float32_to_float16(k44[0]);
g00[5] = float32_to_float16(k54[0]);
g00[6] = float32_to_float16(k64[0]);
g00[7] = float32_to_float16(k74[0]);
g00 += 8;
g00[0] = float32_to_float16(k05[0]);
g00[1] = float32_to_float16(k15[0]);
g00[2] = float32_to_float16(k25[0]);
g00[3] = float32_to_float16(k35[0]);
g00[4] = float32_to_float16(k45[0]);
g00[5] = float32_to_float16(k55[0]);
g00[6] = float32_to_float16(k65[0]);
g00[7] = float32_to_float16(k75[0]);
g00 += 8;
g00[0] = float32_to_float16(k06[0]);
g00[1] = float32_to_float16(k16[0]);
g00[2] = float32_to_float16(k26[0]);
g00[3] = float32_to_float16(k36[0]);
g00[4] = float32_to_float16(k46[0]);
g00[5] = float32_to_float16(k56[0]);
g00[6] = float32_to_float16(k66[0]);
g00[7] = float32_to_float16(k76[0]);
g00 += 8;
g00[0] = float32_to_float16(k07[0]);
g00[1] = float32_to_float16(k17[0]);
g00[2] = float32_to_float16(k27[0]);
g00[3] = float32_to_float16(k37[0]);
g00[4] = float32_to_float16(k47[0]);
g00[5] = float32_to_float16(k57[0]);
g00[6] = float32_to_float16(k67[0]);
g00[7] = float32_to_float16(k77[0]);
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_sum8 = _mm256_comp_fmadd_ps(_w0, _val80, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w1, _val81, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w2, _val82, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w3, _val83, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w4, _val84, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w5, _val85, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w6, _val86, _sum8);
_sum8 = _mm256_comp_fmadd_ps(_w7, _val87, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_w0, _val90, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w1, _val91, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w2, _val92, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w3, _val93, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w4, _val94, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w5, _val95, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w6, _val96, _sum9);
_sum9 = _mm256_comp_fmadd_ps(_w7, _val97, _sum9);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_sum10 = _mm256_comp_fmadd_ps(_w0, _val100, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w1, _val101, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w2, _val102, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w3, _val103, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w4, _val104, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w5, _val105, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w6, _val106, _sum10);
_sum10 = _mm256_comp_fmadd_ps(_w7, _val107, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_w0, _val110, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w1, _val111, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w2, _val112, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w3, _val113, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w4, _val114, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w5, _val115, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w6, _val116, _sum11);
_sum11 = _mm256_comp_fmadd_ps(_w7, _val117, _sum11);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_comp_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_comp_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_comp_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = loadfp16(kptr);
__m256 _w1 = loadfp16(kptr + 8);
__m256 _w2 = loadfp16(kptr + 16);
__m256 _w3 = loadfp16(kptr + 24);
__m256 _w4 = loadfp16(kptr + 32);
__m256 _w5 = loadfp16(kptr + 40);
__m256 _w6 = loadfp16(kptr + 48);
__m256 _w7 = loadfp16(kptr + 56);
_sum = _mm256_comp_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_comp_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_comp_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_comp_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_comp_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_comp_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_comp_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_comp_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GxB_Desc_get.c | //------------------------------------------------------------------------------
// GxB_Desc_get: get a field in a descriptor
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Desc_get // get a parameter from a descriptor
(
GrB_Descriptor desc, // descriptor to query; NULL is ok
GrB_Desc_Field field, // parameter to query
... // return value of the descriptor
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Desc_get (desc, field, &value)") ;
GB_RETURN_IF_FAULTY (desc) ;
//--------------------------------------------------------------------------
// get the parameter
//--------------------------------------------------------------------------
va_list ap ;
switch (field)
{
case GrB_OUTP :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->out ;
}
break ;
case GrB_MASK :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->mask ;
}
break ;
case GrB_INP0 :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->in0 ;
}
break ;
case GrB_INP1 :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->in1 ;
}
break ;
case GxB_DESCRIPTOR_NTHREADS : // same as GxB_NTHREADS
{
va_start (ap, field) ;
int *nthreads = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (nthreads) ;
int nth = (desc == NULL) ? GxB_DEFAULT : desc->nthreads_max ;
(*nthreads) = nth ;
}
break ;
case GxB_DESCRIPTOR_CHUNK : // same as GxB_CHUNK
{
va_start (ap, field) ;
double *chunk = va_arg (ap, double *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (chunk) ;
(*chunk) = (desc == NULL) ? GxB_DEFAULT : desc->chunk ;
}
break ;
case GxB_AxB_METHOD :
{
va_start (ap, field) ;
GrB_Desc_Value *value = va_arg (ap, GrB_Desc_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (value) ;
(*value) = (desc == NULL) ? GxB_DEFAULT : desc->axb ;
}
break ;
case GxB_SORT :
{
va_start (ap, field) ;
int *do_sort = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (do_sort) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->do_sort ;
(*do_sort) = s ;
}
break ;
case GxB_COMPRESSION :
{
va_start (ap, field) ;
int *compression = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (compression) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->compression ;
(*compression) = s ;
}
break ;
case GxB_IMPORT :
{
va_start (ap, field) ;
int *method = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (method) ;
int s = (desc == NULL) ? GxB_DEFAULT : desc->import ;
if (s != GxB_DEFAULT) s = GxB_SECURE_IMPORT ;
(*method) = s ;
}
break ;
default :
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
Quantum
*magick_restrict q;
ssize_t
count,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(IsGrayColorspace(cube_info->quantize_info->colorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
double
bisect;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha,
beta,
distance,
pixel;
DoublePixelPacket
*magick_restrict q;
PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*p->alpha);
beta=(MagickRealType) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha;
PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelTLS(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelTLS(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelTLS(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelTLS(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
Quantum
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelTLS(pixels);
return(MagickTrue);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
Quantum
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
return(status);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if (((size_t) 1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansTLS(KmeansInfo **kmeans_info)
{
ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansTLS(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansTLS(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
n=number_colors;
for (depth=1; n != 0; depth++)
n>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansTLS(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
ssize_t
j,
y;
for (j=0; j < (ssize_t) number_threads; j++)
(void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
ssize_t
i,
k;
/*
Assign each pixel whose mean has the least squared color distance.
*/
k=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
k=i;
}
}
kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][k].count++;
kmeans_pixels[id][k].distortion+=min_distance;
SetPixelIndex(image,(Quantum) k,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (j=1; j < (ssize_t) number_threads; j++)
{
ssize_t
k;
for (k=0; k < (ssize_t) image->colors; k++)
{
kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red;
kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green;
kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue;
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black;
kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count;
kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count);
image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red;
image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green;
image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue;
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black;
distortion+=kmeans_pixels[0][j].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansTLS(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
ImageType
type;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
type=IdentifyImageGray(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5))
depth--;
if (IsGrayImageType(type) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireCacheView(image);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=GetOpenMPMaximumThreads();
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=image->colormap[i].red != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
status|=image->colormap[i].green != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
status|=image->colormap[i].blue != ScaleAnyToQuantum(
ScaleQuantumToAny(image->colormap[i].blue,range),range);
if (status == 0)
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireCacheView(image);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
register ssize_t
i;
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelRed(p),range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelGreen(p),range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelBlue(p),range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=GetPixelOpacity(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelOpacity(p),range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=GetPixelIndex(indexes+x) !=
ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex(indexes+
x),range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=MAGICKCORE_QUANTUM_DEPTH;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].red,range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].green,range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].blue,range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
image->colormap[i].opacity,range),range);
}
status=SyncImage(image);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if (QuantumRange <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(q),
range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(q),
range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(q),
range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(q),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
#define STACK_SIZE (8 * 1024 * 1024)
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[SIZE_OF_BUFFERS],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
double is_randlc( double *X, double *A );
void full_verify( void );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double is_randlc(X, A)
double *X;
double *A;
{
static int KS=0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
int i, j, k;
k = MAX_KEY/4;
for (i=0; i<NUM_KEYS; i++)
{
x = is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %d\n",
j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[MAX_KEY];
#pragma omp master
{
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] = 0;
}
#pragma omp barrier
for (i=0; i<MAX_KEY; i++)
prv_buff1[i] = 0;
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ ) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
prv_buff1[key_buff2[i]]++; /* Now they have individual key */
}
/* population */
for( i=0; i<MAX_KEY-1; i++ )
prv_buff1[i+1] += prv_buff1[i];
#pragma omp critical
{
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] += prv_buff1[i];
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
#pragma omp barrier
#pragma omp master
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff1;
} /* end master */
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
static int realmain(void *cargv)
{
unsigned argv = (unsigned)((long)cargv);
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
omp_set_num_threads(argv);
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Initialize timer */
timer_clear( 0 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
#pragma omp parallel
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
#pragma omp parallel private(iteration)
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
//#pragma omp master
//if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 ) {
passed_verification = 0;
}
#ifdef BOMP
//backend_create_time(argv);
#endif
printf("Computetime %d %f\n", argv, timecounter);
printf("client done\n");
/* c_print_results( "IS", */
/* CLASS, */
/* TOTAL_KEYS, */
/* 0, */
/* 0, */
/* MAX_ITERATIONS, */
/* nthreads, */
/* timecounter, */
/* ((double) (MAX_ITERATIONS*TOTAL_KEYS)) */
/* /timecounter/1000000., */
/* "keys ranked", */
/* passed_verification, */
/* NPBVERSION, */
/* COMPILETIME, */
/* CC, */
/* CLINK, */
/* C_LIB, */
/* C_INC, */
/* CFLAGS, */
/* CLINKFLAGS, */
/* "randlc"); */
/**************************/
} /* E N D P R O G R A M */
/**************************/
#define STACK_SIZE (8 * 1024 * 1024)
int main(int argc, char** argv)
{
if (argc != 2) { /* Print usage */
printf("Usage: %s <Number of threads>\n", argv[0]);
exit(-1);
}
#ifdef BOMP
bomp_bomp_init(atoi(argv[1]));
#endif /* BOMP */
realmain((void*)((long)atoi(argv[1])));
}
|
mpm_search_element_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Bodhinanda Chandra
//
#ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY
#define KRATOS_MPM_SEARCH_ELEMENT_UTILITY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "utilities/binbased_fast_point_locator.h"
#include "particle_mechanics_application_variables.h"
namespace Kratos
{
namespace MPMSearchElementUtility
{
typedef std::size_t IndexType;
typedef std::size_t SizeType;
/**
* @brief Search element connectivity for each particle
* @details A search is performed to know in which grid element the material point falls.
* If one or more material points fall in the grid element, the grid element is
* set to be active and its connectivity is associated to the material point
* element.
* STEPS:
* 1) All the elements are set to be INACTIVE
* 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE
*
*/
template<std::size_t TDim>
void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults,
const double Tolerance)
{
// Reset elements to inactive
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i){
auto element_itr = rBackgroundGridModelPart.Elements().begin() + i;
auto& rGeom = element_itr->GetGeometry();
element_itr->Reset(ACTIVE);
for (IndexType j=0; j < rGeom.PointsNumber(); ++j)
rGeom[j].Reset(ACTIVE);
}
// Search background grid and make element active
Vector N;
const int max_result = 1000;
#pragma omp parallel
{
BinBasedFastPointLocator<TDim> SearchStructure(rBackgroundGridModelPart);
SearchStructure.UpdateSearchDatabase();
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_result);
// Element search and assign background grid
#pragma omp for
for(int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i){
auto element_itr = rMPMModelPart.Elements().begin() + i;
const array_1d<double,3>& xg = element_itr->GetValue(MP_COORD);
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelem;
// FindPointOnMesh find the background element in which a given point falls and the relative shape functions
bool is_found = SearchStructure.FindPointOnMesh(xg, N, pelem, result_begin, MaxNumberOfResults, Tolerance);
if (is_found == true) {
pelem->Set(ACTIVE);
element_itr->GetGeometry() = pelem->GetGeometry();
auto& rGeom = element_itr->GetGeometry();
for (IndexType j=0; j < rGeom.PointsNumber(); ++j)
rGeom[j].Set(ACTIVE);
}
else{
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id()
<< " is failed. Geometry is cleared." << std::endl;
element_itr->GetGeometry().clear();
element_itr->Reset(ACTIVE);
element_itr->Set(TO_ERASE);
}
}
// Condition search and assign background grid
#pragma omp for
for(int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i){
auto condition_itr = rMPMModelPart.Conditions().begin() + i;
if (condition_itr->Has(MPC_COORD)){
const array_1d<double,3>& xg = condition_itr->GetValue(MPC_COORD);
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelem;
// FindPointOnMesh find the background element in which a given point falls and the relative shape functions
bool is_found = SearchStructure.FindPointOnMesh(xg, N, pelem, result_begin, MaxNumberOfResults, Tolerance);
if (is_found == true) {
pelem->Set(ACTIVE);
condition_itr->GetGeometry() = pelem->GetGeometry();
auto& rGeom = condition_itr->GetGeometry();
for (IndexType j=0; j < rGeom.PointsNumber(); ++j)
rGeom[j].Set(ACTIVE);
}
else{
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id()
<< " is failed. Geometry is cleared." << std::endl;
condition_itr->GetGeometry().clear();
condition_itr->Reset(ACTIVE);
condition_itr->Set(TO_ERASE);
}
}
}
}
}
} // end namespace MPMSearchElementUtility
} // end namespace Kratos
#endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
|
pt_to_pt_multiPingpong.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point multi-pingpong mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly multiPingpong */
/* -funnelled multiPingpong */
/* -multiple multiPingpong */
/*-----------------------------------------------------------*/
#include "pt_to_pt_multiPingpong.h"
/*-----------------------------------------------------------*/
/* multiPingPong */
/* */
/* Driver subroutine for the multi-pingpong benchmark. */
/*-----------------------------------------------------------*/
int multiPingPong(int benchmarkType){
int dataSizeIter;
int pongWorldRank;
char pongProcName[MPI_MAX_PROCESSOR_NAME];
int balance;
pingNode = 0;
pongNode = 1;
/* Check if there's a balance in num of MPI processes
on pingNode and pongNode. */
balance = crossCommBalance(pingNode, pongNode);
/* If not balanced.. */
if (balance == FALSE){
/* ..master prints error */
if (myMPIRank == 0){
printBalanceError();
}
/* ..and all process exit function. */
return 1;
}
/* Exchange MPI_COMM_WORLD ranks for processes in same crossComm */
exchangeWorldRanks(pingNode, pongNode, &pongWorldRank);
/* Processes on pongNode send processor name to pingNode procs. */
sendProcName(pingNode, pongNode, pongProcName);
/* Print comm world ranks & processor name of processes
* taking part in multi-pingpong benchmark.
*/
printMultiProcInfo(pingNode, pongWorldRank, pongProcName);
/* Barrier to ensure that all procs have completed
* printMultiProcInfo before prinring column headings.
*/
MPI_Barrier(comm);
/* Master process then prints report column headings */
if (myMPIRank == 0){
printBenchHeader();
}
/* Initialise repsToDo to defaultReps at start of benchmark */
repsToDo = defaultReps;
dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */
/* Loop over data sizes */
while (dataSizeIter <= maxDataSize){
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/* Allocate space for the main data arrays */
allocateMultiPingpongData(sizeofBuffer);
/* warm-up */
if (benchmarkType == MASTERONLY){
/* Masteronly warm-up */
masteronlyMultiPingpong(warmUpIters, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* Funnelled warm-up sweep */
funnelledMultiPingpong(warmUpIters, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
/* Multiple pingpong warm-up */
multipleMultiPingpong(warmUpIters, dataSizeIter);
}
/* Verification test for multi-pingpong */
testMultiPingpong(sizeofBuffer, dataSizeIter);
/* Initialise benchmark */
benchComplete = FALSE;
/* Keep executing benchmark until target time is reached */
while (benchComplete != TRUE){
/* MPI_Barrier to synchronise processes.
Then start the timer. */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == MASTERONLY){
/* Execute masteronly multipingpong repsToDo times */
masteronlyMultiPingpong(repsToDo, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* Execute funnelled multipingpong */
funnelledMultiPingpong(repsToDo, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multipleMultiPingpong(repsToDo, dataSizeIter);
}
/* Stop the timer..MPI_Barrier to synchronise processes
* for more accurate timing.
*/
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Call repTimeCheck to check if target time is reached. */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
} /* End of loop to check if benchComplete is true */
/* Master process sets benchmark results */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free the allocated space for the main data arrays */
freeMultiPingpongData();
/* Update dataSize before next iteration */
dataSizeIter = dataSizeIter * 2;
} /* end loop over data sizes */
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyMultiPingpong */
/* */
/* All MPI processes in crossComm = pingNode sends a single */
/* fixed length message to the neighbouring process in */
/* crossComm = pongNode. */
/* The neighbouring processes then sends the message back */
/* to the first process. */
/*-----------------------------------------------------------*/
int masteronlyMultiPingpong(int totalReps, int dataSize){
int repIter, i;
for (repIter = 1; repIter <= totalReps; repIter++){
/* Threads under each MPI process with
* crossCommRank = pingNode write to pingSendBuf
* array with a PARALLEL FOR directive.
*/
if (crossCommRank == pingNode){
#pragma omp parallel for default(none) \
private(i) \
shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Each process with crossCommRank = pingNode sends
* buffer to MPI process with rank = pongNode in crossComm.
*/
MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);
/* The processes then wait for a message from pong process
* and each thread reads its part of the received buffer.
*/
MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, \
TAG, crossComm, &status);
#pragma omp parallel for default(none) \
private(i) \
shared(pongRecvBuf,finalRecvBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Each process with crossCommRank = pongNode receives
* the message from the pingNode processes.
*/
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\
TAG, crossComm, &status);
/* Each thread copies its part of the received buffer
* to pongSendBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(pongSendBuf,pingRecvBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* The processes now send pongSendBuf to processes
* with crossCommRank = pingNode.
*/
MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, \
TAG, crossComm);
}
} /* End repetitions loop */
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledMultiPingpong */
/* */
/* All MPI processes in crossComm = pingNode sends a single */
/* fixed length message to the neighbouring process in */
/* crossComm = pongNode. */
/* The neighbouring processes then sends the message back */
/* to the first process. */
/* All communication takes place within the OpenMP parallel */
/* region for this benchmark. */
/*-----------------------------------------------------------*/
int funnelledMultiPingpong(int totalReps, int dataSize){
int repIter, i;
/* Open the parallel region for threads */
#pragma omp parallel default(none) \
private(i,repIter) \
shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \
shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(dataSize,globalIDarray,crossComm,status) \
shared(totalReps,myMPIRank,crossCommRank)
{
/* loop totalRep times */
for (repIter = 1; repIter <= totalReps; repIter++){
/* All threads under each MPI process with
* crossCommRank = pingNode write to pingSendBuf
* array using a parallel for directive.
*/
if (crossCommRank == pingNode){
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier at end of omp for takes care of synchronisation */
/* Master thread under each pingNode process sends
* buffer to corresponding MPI process in pongNode
* using crossComm.
*/
#pragma omp master
{
MPI_Send(pingSendBuf, sizeofBuffer, MPI_INT, pongNode, TAG, crossComm);
/* Master thread then waits for a message from the pong process. */
MPI_Recv(pongRecvBuf, sizeofBuffer, MPI_INT, pongNode, TAG, \
crossComm, &status);
}
/* Barrier needed to wait for master thread to complete MPI_Recv */
#pragma omp barrier
/* Each thread then reads its part of the received buffer. */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Master thread under each pongNode process receives
* the message from the pingNode processes.
*/
#pragma omp master
{
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, pingNode,\
TAG, crossComm, &status);
}
/* Barrier needed to wait on master thread */
#pragma omp barrier
/* Each thread reads its part of the received buffer. */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* Implicit barrier at end of omp for */
/* Master threads send their pongSendBuf to processes
* with crossCommRank = pingNode.
*/
#pragma omp master
{
MPI_Send(pongSendBuf, sizeofBuffer, MPI_INT, pingNode, TAG, crossComm);
}
}
} /* End of repetitions loop. */
} /* End of parallel region */
return 0;
}
/*-----------------------------------------------------------*/
/* multipleMultiPingpong */
/* */
/* Multiple threads take place in the communication and */
/* computation. */
/* Each thread of all MPI processes in crossComm = pingNode */
/* sends a portion of the message to the neighbouring */
/* process in crossComm = pongNode. */
/* Each thread of the neighbouring processes then sends */
/* the message back to the first process. */
/*-----------------------------------------------------------*/
int multipleMultiPingpong(int totalReps, int dataSize){
int repIter, i;
int lBound;
/* Open parallel region for threads */
#pragma omp parallel default(none) \
private(i,repIter,status,lBound) \
shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \
shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(dataSize,globalIDarray,crossComm) \
shared(totalReps,myMPIRank,crossCommRank)
{
for (repIter=1; repIter<=totalReps; repIter++){ /* loop totalRep times */
if (crossCommRank == pingNode){
/* Calculate lower bound of data array for the thread */
lBound = (myThreadID * dataSize);
/* All threads write to its part of the pingBuf
* array using a parallel for directive.
*/
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier at end of for not needed for multiple */
/* Each thread under ping process sends dataSize items
* to pongNode process in crossComm.
* myThreadID is used as tag to ensure data goes to
* correct place in buffer.
*/
MPI_Send(&pingSendBuf[lBound], dataSize, MPI_INT, pongNode, \
myThreadID, crossComm);
/* Thread then waits for a message from pongNode. */
MPI_Recv(&pongRecvBuf[lBound], dataSize, MPI_INT, pongNode, \
myThreadID, crossComm, &status);
/* Each thread reads its part of the received buffer. */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pongRecvBuf[i];
}
}
else if (crossCommRank == pongNode){
/* Calculate lower and upper bound of data array */
lBound = (myThreadID * dataSize);
/* Each thread under pongRank receives a message from
* the ping process.
*/
MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, pingNode, \
myThreadID, crossComm, &status);
/* Each thread now copies its part of the received buffer
* to pongSendBuf.
*/
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pongSendBuf[i] = pingRecvBuf[i];
}
/* Each thread now sends pongSendBuf to ping process. */
MPI_Send(&pongSendBuf[lBound], dataSize, MPI_INT, pingNode, \
myThreadID, crossComm);
}
} /* End repetitions loop */
} /* End parallel region */
return 0;
}
/*-----------------------------------------------------------*/
/* allocateMultiPingpongData */
/* */
/* Allocates space for the main data arrays. */
/* Size of each array is specified by subroutine argument. */
/*-----------------------------------------------------------*/
int allocateMultiPingpongData(int sizeofBuffer){
if (crossCommRank == pingNode){
/* allocate space for arrays that MPI processes
* with crossCommRank = pingRank will use.
*/
pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
pongRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
}
else if (crossCommRank == pongNode){
/* allocate space for arrays that MPI processes
* with crossCommRank = pongNode will use.
*/
pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
pongSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
}
return 0;
}
/*-----------------------------------------------------------*/
/* freeMultiPingpongData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freeMultiPingpongData(){
if (crossCommRank == pingNode){
free(pingSendBuf);
free(pongRecvBuf);
free(finalRecvBuf);
}
else if (crossCommRank == pongNode){
free(pingRecvBuf);
free(pongSendBuf);
}
return 0;
}
/*-----------------------------------------------------------*/
/* testMultiPingpong */
/* */
/* Verifies the the multi pingpong benchmark worked */
/* correctly. */
/*-----------------------------------------------------------*/
int testMultiPingpong(int sizeofBuffer, int dataSize){
int i;
int testFlag, localTestFlag;
/* Initialise localTestFlag to true */
localTestFlag = TRUE;
/* All processes with crossCommRank = pingNode check
* if multi-pingpong worked ok.
*/
if (crossCommRank == pingNode){
/* allocate space for testBuf */
testBuf = (int *)malloc(sizeof(int) * sizeofBuffer);
/* Construct testBuf array with correct values.
* These are the values that should be in finalRecvBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(testBuf,dataSize,sizeofBuffer,globalIDarray)\
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
testBuf[i] = globalIDarray[myThreadID];
}
/* Compare each element of testBuf and finalRecvBuf */
for (i=0; i<sizeofBuffer; i++){
if (testBuf[i] != finalRecvBuf[i]){
localTestFlag = FALSE;
}
}
/* Free space for testBuf */
free(testBuf);
}
/* Reduce localTestFlag to master */
MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT,MPI_LAND, 0, comm);
/* Master then sets testOutcome using reduceFlag */
if (myMPIRank == 0){
setTestOutcome(testFlag);
}
return 0;
}
|
DRACC_OMP_018_Counter_wrong_lock_simd_yes.c | /*
Concurrent access on a counter with the wrong lock, by utilising OpenMP Lock Routines and simd. Atomicity Violation.
Two locks are used to ensure that addition and substraction cannot be interrupted by themselfes on other teams.
Although they are able to interrupt eachother leading to a wrong result. Intra and Inter Region.
*/
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
#define N 100
#define C 512
#pragma omp declare target
omp_lock_t addlock;
omp_lock_t sublock;
#pragma omp end declare target
int countervar[C];
int init(){
for(int i=0; i<C; i++){
countervar[i]=0;
}
return 0;
}
int count(){
#pragma omp target map(tofrom:countervar[0:C]) device(0)
#pragma omp teams
{
omp_init_lock(&addlock);
omp_init_lock(&sublock);
#pragma omp distribute parallel for
for(int j=0; j<N; j++){
omp_set_lock(&addlock);
#pragma omp simd
for(int i=0; i<C; i++){
countervar[i]++;
}
omp_unset_lock(&addlock);
omp_set_lock(&sublock);
#pragma omp simd
for(int i=0; i<C; i++){
countervar[i]-=2;
}
omp_unset_lock(&sublock);
}
omp_destroy_lock(&addlock);
omp_destroy_lock(&sublock);
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(countervar[i]!=-N){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
count();
check();
return 0;
} |
multiple.c | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
int main(int argc, char *argv[])
{
int provided, rank, ntasks;
int tid, nthreads, msg, i;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
/* Check that the MPI implementation supports MPI_THREAD_MULTIPLE */
if (provided < MPI_THREAD_MULTIPLE) {
printf("MPI does not support MPI_THREAD_MULTIPLE\n");
MPI_Abort(MPI_COMM_WORLD, -1);
return 0;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
#pragma omp parallel private(msg, tid, nthreads, i)
{
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
if (rank == 0) {
#pragma omp single
{
printf("%i threads in master rank\n", nthreads);
}
for (i = 1; i < ntasks; i++)
MPI_Send(&tid, 1, MPI_INTEGER, i, tid, MPI_COMM_WORLD);
} else {
MPI_Recv(&msg, 1, MPI_INTEGER, 0, tid, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("Rank %i thread %i received %i\n", rank, tid, msg);
}
}
MPI_Finalize();
return 0;
}
|
GB_concat_sparse_template.c | //------------------------------------------------------------------------------
// GB_concat_sparse_template: concatenate a tile into a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The tile A is hypersparse, sparse, or full, not bitmap. If C is iso, then
// so is A, and the values are not copied here.
{
//--------------------------------------------------------------------------
// get C and the tile A
//--------------------------------------------------------------------------
#ifndef GB_ISO_CONCAT
const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
//--------------------------------------------------------------------------
// copy the tile A into C
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(static)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
int64_t j = GBH (Ah, k) ;
const int64_t pC_start = W [j] ;
//------------------------------------------------------------------
// find the part of the kth vector A(:,j) for this task
//------------------------------------------------------------------
int64_t pA_start, pA_end ;
// as done by GB_get_pA, but also get p0 = Ap [k]
const int64_t p0 = GBP (Ap, k, avlen) ;
const int64_t p1 = GBP (Ap, k+1, avlen) ;
if (k == kfirst)
{
// First vector for task tid; may only be partially owned.
pA_start = pstart_Aslice [tid] ;
pA_end = GB_IMIN (p1, pstart_Aslice [tid+1]) ;
}
else if (k == klast)
{
// Last vector for task tid; may only be partially owned.
pA_start = p0 ;
pA_end = pstart_Aslice [tid+1] ;
}
else
{
// task tid entirely owns this vector A(:,k).
pA_start = p0 ;
pA_end = p1 ;
}
//------------------------------------------------------------------
// append A(:,j) onto C(:,j)
//------------------------------------------------------------------
GB_PRAGMA_SIMD
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = GBI (Ai, pA, avlen) ; // i = Ai [pA]
int64_t pC = pC_start + pA - p0 ;
Ci [pC] = cistart + i ;
// Cx [pC] = Ax [pA] ;
GB_COPY (pC, pA, A_iso) ;
}
}
}
done = true ;
}
#undef GB_CTYPE
#undef GB_ISO_CONCAT
|
pireduction-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// Classic PI calculation using reduction
#define num_steps 2000000000
int main(int argc, char** argv)
{
double pi = 0;
int i;
#pragma omp parallel for reduction(+:pi)
for (i = 0; i < num_steps; i++) {
pi += 1.0 / (i * 4.0 + 1.0);
}
pi = pi * 4.0;
return 0;
}
|
mxv_omp_mpi.c | #include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define min(x, y) ((x)<(y)?(x):(y))
/**
Program to multiply a matrix times a vector using both
mpi to distribute the computation among nodes and omp
to distribute the computation among threads.
*/
int main(int argc, char* argv[])
{
int nrows, ncols;
double *aa, *b, *c;
double *buffer, ans;
double *times;
double total_times;
int run_index;
int nruns;
int myid, master, numprocs;
double starttime, endtime;
MPI_Status status;
int i, j, numsent, sender;
int anstype, row;
srand(time(0));
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (argc > 1) {
nrows = atoi(argv[1]);
ncols = nrows;
aa = (double*)malloc(sizeof(double) * nrows * ncols);
b = (double*)malloc(sizeof(double) * ncols);
c = (double*)malloc(sizeof(double) * nrows);
buffer = (double*)malloc(sizeof(double) * ncols);
master = 0;
if (myid == master) {
// Master Code goes here
for (i = 0; i < nrows; i++) {
for (j = 0; j < ncols; j++) {
aa[i*ncols + j] = (double)rand()/RAND_MAX;
}
}
starttime = MPI_Wtime();
numsent = 0;
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
for (i = 0; i < min(numprocs-1, nrows); i++) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[i * ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, i+1, i+1, MPI_COMM_WORLD);
numsent++;
}
for (i = 0; i < nrows; i++) {
MPI_Recv(&ans, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
sender = status.MPI_SOURCE;
anstype = status.MPI_TAG;
c[anstype-1] = ans;
if (numsent < nrows) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[numsent*ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, sender, numsent+1,
MPI_COMM_WORLD);
numsent++;
} else {
MPI_Send(MPI_BOTTOM, 0, MPI_DOUBLE, sender, 0, MPI_COMM_WORLD);
}
}
endtime = MPI_Wtime();
printf("%f\n",(endtime - starttime));
} else {
// Slave Code goes here
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
if (myid <= nrows) {
while(1) {
MPI_Recv(buffer, ncols, MPI_DOUBLE, master, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
if (status.MPI_TAG == 0){
break;
}
row = status.MPI_TAG;
ans = 0.0;
#pragma omp parallel
#pragma omp shared(ans) for reduction(+:ans)
for (j = 0; j < ncols; j++) {
ans += buffer[j] * b[j];
}
MPI_Send(&ans, 1, MPI_DOUBLE, master, row, MPI_COMM_WORLD);
}
}
}
} else {
fprintf(stderr, "Usage matrix_times_vector <size>\n");
}
MPI_Finalize();
return 0;
}
|
exercicio02.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
static long num_steps = 100000000;
float* cria_vet(int tamanho);
int main() {
double pi, sum = 0.0, tempo_inicial, tempo_final, step;
int i;
step = 1.0 / (double)num_steps;
int nthreads = 4;
omp_set_num_threads(nthreads);
float *vet = cria_vet(nthreads);
tempo_inicial = omp_get_wtime();
#pragma omp parallel
{
int id = omp_get_thread_num();
double local_sum = 0.0, x;
int i;
printf("threads %d\n", id);
for (i = id; i < num_steps; i = i + nthreads) {
x = (i + 0.5) * step;
local_sum = local_sum + 4.0 / (1.0 + x * x);
}
vet[id] = local_sum;
}
for(i = 0; i < nthreads; i++) {
sum = sum + vet[i];
}
pi = step * sum;
tempo_final = omp_get_wtime();
printf("\n Pi = %lf", pi);
printf("\n Tempo gasto: %lf", tempo_final - tempo_inicial);
free(vet);
return 0;
}
float* cria_vet(int tamanho) {
int i;
float *vet = (float *)calloc(tamanho, sizeof(float));
for(i=0; i < tamanho; i++) {
vet[i] = 0.0;
}
return vet;
}
|
stepper_parallel.c | #include "stepper_parallel.h"
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <omp.h>
#include <stdio.h>
//ldoc on
/**
* ## Implementation
*
* ### Structure allocation
*/
central2d_t* central2d_init(float grid_width,
float grid_height,
int nx,
int ny,
int nfield,
flux_t flux,
speed_t speed,
float cfl)
{
/* Description: This function is essentially a constructor for a central2d
structure. The parameters that it accepts are used to set up a central2d
structure. This function returns a pointer to that structure.
What are the arguments?
grid_width - the length of the grid (of canonical cells) in the x direction.
grid_height - the length of the grid (of canonical cells) in the y direction.
nx - number of cells in the x direction.
ny - number of cells in the y direction
nfield - The of quantities that we keep track of in each cell. Equivalently,
this is the number of components in FU, GU, or U for a given cell (3 in our
case). Equivalently, this is the number of sub-arrays in FU, Gu, and U.
flux - a pointer to a function to update F and G using U (see shallow2d.c)
speed - pointer to a function that will calculate the maximum wave speed in
the x and y directions (see shallow2d.c)
cfl - dictates the the allowed time step. */
// We extend to a four cell buffer to avoid BC comm on odd time steps
int ng = 4;
// allocate central2d object.
central2d_t* sim = (central2d_t*) malloc(sizeof(central2d_t));
// populate some of its members.
sim->nx = nx;
sim->ny = ny;
sim->ng = ng;
sim->nfield = nfield;
sim->dx = grid_width/nx;
sim->dy = grid_height/ny;
sim->flux = flux;
sim->speed = speed;
sim->cfl = cfl;
// Calculate nx_all, ny_all... use these to calculate N (the size of
// U, V, FU, or GU)
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int ncells = nx_all*ny_all;
int N = nfield*ncells;
// Here we allocate a single large block of memory. We split this block
// into five pieces. The first four go to U, U_half, FU and GU, respectively
// Each of these pieces have size N.
// The last piece, which has size 6*nx_all, goes to scratch (which is
// essentially used to hold "scratch" calculations that are used in other
// calculations but which we don't need to keep track of).
sim->U = (float*) malloc((4*N + 6*nx_all)* sizeof(float));
sim->U_half = sim->U + N;
sim->FU = sim->U + 2*N;
sim->GU = sim->U + 3*N;
sim->scratch = sim->U + 4*N;
return sim;
} // central2d_t* central2d_init(float w,
void central2d_free(central2d_t* sim)
{
/* Description: This function is a destructor for central2d structures.
What is the argument?
sim - a pointer to the central2d object that you want to destroy. */
free(sim->U);
free(sim);
} // void central2d_free(central2d_t* sim)
int central2d_offset( central2d_t* sim,
int k,
int ix,
int iy)
{
/* Description: this function is used to access different elements of U.
U is an n_fields by nx_all by ny_all array. We think of U as a sequence of
three nx_all by ny_all subarrays, each of whcih is stored in ROW MAJOR order.
The first subarray (the first nx_all*ny_all elements of U) store the value of
"h" for each cell in the grid (including ghost cells)
The second subarray (the next nx_all*ny_all elements of U) store the value of
"hu" for each cell in the grid (including ghost cells).
The third subarray (the final nx_all*ny_all elements of U) store the value of
"hv" for each cell in the grid (including ghost cells),
What are the arguments?
sim - a pointer to the central2d structure whose U array you want to access.
k - the field (component) number you want to access. For us,
k = 0 means "h" componet
k = 1 means "hu" component
k = 2 means "hv" component
ix, iy - the x, y indicies of a canonical cell (within the canonical grid) that
we want to access. ix should be in the set {0, 1... nx-1} and iy should be
in the set {0, 1... ny-1} */
// How does this work?
//
// Suppose that we want to find the hu component of a particular cell in the
// grid. We know that U + nx_all*ny_all is the first index of the subarray
// of U whcih stores the "hu" component of every cell in the grid (including
// ghost cells). The first and last ng rows and columns of the sub array
// correspond to ghost cells. Thus, the column of the sub array corresponding
// to the iyth “canonical” column is iy + ng. Likewise, the row of the sub
// array corresponding to the ixth “canonical” cell is ng + ix. Therefore, the
// location (in memory, within the kth sub array of C) of the value of the "hu"
// component of the (ix, iy) canonical cell is (ng + iy)*nx_all + (ng + ix).
// (remember, each sub array is in ROW MAJOR order)
//
// Combining this with the fact that the first entry of the kth sub array of
// U is at k*nx_all*ny_all, we can conclude that the desired quantity is at
// the following location:
//
// U + k*nx_all*ny_all + (ng+iy)*nx_all + (ng+ix).
//
// Which is exactly what this function returns.
// fetch nx, ny, ng, use them to calculate nx_all and ny_all
int nx = sim->nx, ny = sim->ny, ng = sim->ng;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
// Return the desired quantitity (See "How does this work?" above)
return (k*ny_all + (ng + iy))*nx_all + (ng + ix);
} // int central2d_offset( central2d_t* sim,..
/**
* ### Boundary conditions
*
* In finite volume methods, boundary conditions are typically applied by
* setting appropriate values in ghost cells. For our framework, we will
* apply periodic boundary conditions; that is, waves that exit one side
* of the domain will enter from the other side.
*
* We apply the conditions by assuming that the cells with coordinates
* `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are
* "canonical", and setting the values for all other cells `(ix,iy)`
* to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some
* integers `p` and `q`.
*/
static inline
void copy_subgrid(float* restrict dst,
const float* restrict src,
int nx,
int ny,
int stride_dst,
int stride_src)
{
/* Description: This copies an nx by ny part of src to dest.
Both src and dst are assumed to be in ROW MAJOR order.
What are the arguments?
dst - the destination of the copy
src - the source of the copy
nx - the number of cells in the x direction to be copied.
ny - the number of cells in the y direction to be copied.
stride_dst - the distance (in memory) between successive elements of a column
of dst. Equivalently, the size (in memory) of a row of dst.
stride_src - the distance (in memory) between successive elements of a column
of src. Equivalently, the size (in memory) of a row of src. */
for (int iy = 0; iy < ny; ++iy) {
for (int ix = 0; ix < nx; ++ix) {
dst[ix + iy*stride_dst] = src[ix + iy*stride_src];
} // for (int ix = 0; ix < nx; ++ix) {
} // for (int iy = 0; iy < ny; ++iy) {
} // void copy_subgrid(float* restrict dst,...
void central2d_periodic(float* restrict U_global,
int nx,
int ny,
int ng,
int nfield)
{
/* Description: This function applies periodic boundary conditions to U.
U is an nfield by nx_all by ny_all array. We think of U as a sequence of
nfield sub-arrays, each of size nx_all by ny_all. Each sub array is stored
in ROW MAJOR order.
What are the arguments?
U - the U array of a central2d structure
nx - number of canonical cells in the x direction of the grid
ny - the number of canonical cells in the y direction of the grid
ng - number of layers of ghost cells (the first and las ng rows of U are
ghost cells. Likewise, the first and last ng columns of U are ghost cells).
nfield - the number of fields/componenets/subarrays in U.
*/
// Stride and number per field.
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int s = nx_all;
int field_stride = nx_all*ny_all;
/* Offsets of left, right, top, and bottom data blocks and ghost blocks
Increasing the row index increases the y value. Increasing the column index
increases the x value. Thus, the top of the grid corresponds to the last row.
Likewise, the right of the grid corresponds to the last column.
l denotes the address of the bottom right (min x and y indicies) of the cells
in U which will become the left boundary of the ghost cells in U.
lg denotes the address of the bottom right (min x and y indicies) of the left
boundary cells in U.
r, b, t, br, bg and tg are similar. If we think about it, the locations of
l, r, b, t, lg, lr, bg, and tg should make sense. (draw a picture, it helps!) */
int l = nx, lg = 0;
int r = ng, rg = nx + ng;
int t = ng*s, tg = (ny + ng)*s;
int b = ny*s, bg = 0;
// Copy data into ghost cells on each side
for (int k = 0; k < nfield; ++k) {
// Get the address of the kth subarray of U.
float* Uk_global = U_global + k*field_stride;
copy_subgrid(Uk_global + lg, Uk_global + l, ng, ny_all, s, s);
copy_subgrid(Uk_global + rg, Uk_global + r, ng, ny_all, s, s);
copy_subgrid(Uk_global + tg, Uk_global + t, nx_all, ng, s, s);
copy_subgrid(Uk_global + bg, Uk_global + b, nx_all, ng, s, s);
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_periodic(float* restrict U,
void central2d_local_BC(float* restrict U,
int nx,
int ny,
int ng,
int nfield,
float* restrict U_global,
int nx_global,
int ny_global,
int xlow_local,
int ylow_local)
{
/* Description: This function applies boundary conditions to U a piece of
the partition of the global U array.
U is an nfield by nx_all by ny_all array. We think of U as a sequence of
nfield sub-arrays, each of size nx_all by ny_all. Each sub array is stored
in ROW MAJOR order.
What are the arguments?
U - the U array of a central2d structure for a piece of the global grid.
nx - number of canonical cells in the x direction of the grid
ny - the number of canonical cells in the y direction of the grid
ng - number of layers of ghost cells (the first and las ng rows of U are
ghost cells. Likewise, the first and last ng columns of U are ghost cells).
nfield - the number of fields/componenets/subarrays in U.
U_global - the U array for the global grid.
nx_global - the number of canonical cells in the x direction of the global
grid.
ny_global - the number of canonical cells in the y direction of the global
grid.
xlow_local, ylow_local - these tell us how U fits into U local. In
particular, the first row and column of canonical cells in U corresponds to
the xlow_local row and ylow_local column of the global U. */
// Stride and number per field for local U.
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int stride_local = nx_all;
int field_stride = nx_all*ny_all;
// Stride and number per field for global U.
int nx_global_all = nx_global + 2*ng;
int ny_global_all = ny_global + 2*ng;
int stride_global = nx_global_all;
int field_stride_global = nx_global_all*ny_global_all;
/* Offsets of left, right, top, and bottom data blocks and ghost blocks
Increasing the row index increases the y value. Increasing the column index
increases the x value. Thus, the top of the grid corresponds to the last row.
Likewise, the right of the grid corresponds to the last column.
l denotes the address of the bottom right (min x and y indicies) of the cells
in U_global which will become the left boundary of the ghost cells in U.
lg denotes the address of the bottom right (min x and y indicies) of the left
boundary cells in U.
r, b, t, rg, bg, and tg are similar.
To get these, we first need to find the address of the cell of U_global (for a
particular field/component/subarray) which corresponds to the bottom left
ghost cell of U.
To do this, let's think about which row and column this corresponds to. There
are nx_global_all columns in the global grid (with ghost cells). The first ng
of those columns hold ghost cells (which we used to apply BCs!). By
definition, xlow_local tells us which column (of canoncal cells) in U_global
corresponds to the first column of canonical cells of U. Thus, the first
column of caonical cells in U coresponds to column xlow_local + ng in U_global
The first column of ghost cells is ng columns to the left of that. Thus, the
column index in U_global corresonding to the leftmost column of ghost cells in
U is xlow_local + ng - ng = xlow_local.
Using analagous logic, we can conclude that the bottom row of ghost cells in U
corresponds to row ylow_local + ng - ng = ylow_local in U_global.
Since there are nx_global_all entries per row of U_global (which is stored in
ROW MAJOR order), the address in U_global corresponding to the bottom
left most ghost cell in U is xlow_local + ylow_local*nx_global_all. We call
this quantity offset. */
int offset = xlow_local + ylow_local*nx_global_all;
int l = offset;
int lg = 0;
int r = offset + (nx + ng);
int rg = nx + ng;
int t = offset + (ny + ng)*nx_global_all;
int tg = (ny + ng)*nx_all;
int b = offset;
int bg = 0;
for (int k = 0; k < nfield; ++k) {
// Get the address of the kth subarray of U and U_global.
float* Uk_local = U + k*field_stride;
float* Uk_global = U_global + k*field_stride_global;
copy_subgrid(Uk_local + lg, Uk_global + l, ng, ny_all, stride_local, stride_global);
copy_subgrid(Uk_local + rg, Uk_global + r, ng, ny_all, stride_local, stride_global);
copy_subgrid(Uk_local + tg, Uk_global + t, nx_all, ng, stride_local, stride_global);
copy_subgrid(Uk_local + bg, Uk_global + b, nx_all, ng, stride_local, stride_global);
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_local_BC(float* restrict U,...
void central2d_local_to_global(float* restrict U,
int nx,
int ny,
int ng,
int nfield,
float* restrict U_global,
int nx_global,
int ny_global,
int xlow_local,
int ylow_local)
{
/* Description: This function is used to map the first and last ng rows of
canonical cells in U to their corresponding entries in U_global. These
elements are the only elements that the threads communicate to one another.
Thus, this function gets global U ready for the next time step.
What are the arguments?
U - the U array of a central2d structure for a piece of the global grid.
nx - number of canonical cells in the x direction of the grid
ny - the number of canonical cells in the y direction of the grid
ng - number of layers of ghost cells (the first and las ng rows of U are
ghost cells. Likewise, the first and last ng columns of U are ghost cells).
nfield - the number of fields/componenets/subarrays in U.
U_global - the U array for the global grid.
nx_global - the number of canonical cells in the x direction of the global
grid.
ny_global - the number of canonical cells in the y direction of the global
grid.
xlow_local, ylow_local - these tell us how U fits into U local. In
particular, the first row and column of canonical cells in U corresponds to
the xlow_local row and ylow_local column of the global U. */
// Stride and number per field for local U.
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int stride_local = nx_all;
int field_stride = nx_all*ny_all;
// Stried and number per field for global U.
int nx_global_all = nx_global + 2*ng;
int ny_global_all = ny_global + 2*ng;
int stride_global = nx_global_all;
int field_stride_global = nx_global_all*ny_global_all;
/* Find the addresses of the top, bottom, left, and right most canonical
cells within U and U_global.
Increasing the row index increases the y value. Increasing the column index
increases the x value. Thus, the top of the grid corresponds to the last row.
Likewise, the right of the grid corresponds to the last column.
l_local denotes the address of the bottom right (min x and y indicies) cell
in U_global which corresponds to the bottom right canonical cell in U.
l_global denotes the address of the bottom right caonical cell in U.
r_local, r_global, b_local, b_global, t_local, and t_global are similar.
To get these, we first need to find the address of the cell of U_global (for a
particular field/component/subarray) which corresponds to the bottom left
caonical cell in U local.
To do this, let's think about which row and column this corresponds to. There
are nx_global_all columns in the global grid (with ghost cells). The first ng
of those columns hold ghost cells (which we used to apply BCs!). By
definition, xlow_local tells us which column (of canoncal cells) in U_global
corresponds to the first column of canonical cells of U. Thus, the first
column of caonical cells in U coresponds to column xlow_local + ng in U_global
Using analagous logic, we can conclude that the bottom row of caonical in U
corresponds to row ylow_local + ng in U_global.
Since there are nx_global_all entries per row of U_global (which is stored in
ROW MAJOR order), the address in U_global corresponding to the bottom
left most caonical cell in U is (xlow_local + ng) + (ylow_local + ng)*nx_global_all.
We call this quantity offset_global.
Similarly, we find the address within U of the bottom left most caonical cell.
This quantity, which we call offset_local, is equal to ng + ng*nx_all (think
about it) */
int offset_global = (xlow_local + ng) + (ylow_local + ng)*nx_global_all;
int offset_local = ng + ng*nx_all;
int l_local = offset_local;
int l_global = offset_global;
int r_local = offset_local + (nx - ng);
int r_global = offset_global + (nx - ng);
int t_local = offset_local + (ny - ng)*nx_all;
int t_global = offset_global + (ny - ng)*nx_global_all;
int b_local = offset_local;
int b_global = offset_global;
for (int k = 0; k < nfield; ++k) {
// Get the address of the kth subarray of U and U_global.
float* Uk_local = U + k*field_stride;
float* Uk_global = U_global + k*field_stride_global;
copy_subgrid(Uk_global + l_global, Uk_local + l_local, ng, ny, stride_global, stride_local);
copy_subgrid(Uk_global + r_global, Uk_local + r_local, ng, ny, stride_global, stride_local);
copy_subgrid(Uk_global + t_global, Uk_local + t_local, nx, ng, stride_global, stride_local);
copy_subgrid(Uk_global + b_global, Uk_local + b_local, nx, ng, stride_global, stride_local);
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_local_to_global(float* restrict U,
void central2d_U_to_global_U(float* restrict U,
int nx,
int ny,
int ng,
int nfield,
float* restrict U_global,
int nx_global,
int ny_global,
int xlow_local,
int ylow_local)
{
/* Description: This function moves U to global U.
U is an nfield by nx_all by ny_all array. We think of U as a sequence of
nfield sub-arrays, each of size nx_all by ny_all. Each sub array is stored
in ROW MAJOR order.
What are the arguments?
U - the U array of a central2d structure for a piece of the global grid.
nx - number of canonical cells in the x direction of the grid
ny - the number of canonical cells in the y direction of the grid
ng - number of layers of ghost cells (the first and las ng rows of U are
ghost cells. Likewise, the first and last ng columns of U are ghost cells).
nfield - the number of fields/componenets/subarrays in U.
U_global - the U array for the global grid.
nx_global - the number of canonical cells in the x direction of the global
grid.
ny_global - the number of canonical cells in the y direction of the global
grid.
xlow_local, ylow_local - these tell us how U fits into U local. In
particular, the first row and column of canonical cells in U corresponds to
the xlow_local row and ylow_local column of the global U. */
// Stride and number per field for local U.
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int stride_local = nx_all;
int field_stride = nx_all*ny_all;
// Stried and number per field for global U.
int nx_global_all = nx_global + 2*ng;
int ny_global_all = ny_global + 2*ng;
int stride_global = nx_global_all;
int field_stride_global = nx_global_all*ny_global_all;
/* Offsets of U in U_global.
We need to find the address the cell of U_global (for a particular
field/component/subarray) which corresponds to the bottom left caonical
cell of U.
To do this, let's think about which row and column this corresponds to. There
are nx_global_all columns in the global grid (with ghost cells). The first ng
of those columns hold ghost cells (which we used to apply BCs!). By
definition, xlow_local tells us which column (of canoncal cells) in U_global
corresponds to the first column of canonical cells of U. Thus, the first
column of caonical cells in U coresponds to column xlow_local + ng in U_global
Using analagous logic, we can conclude that the bottom row of canonical cells
in U corresponds to row ylow_local + ng in U_global.
Since there are nx_global_all entries per row of U_global (which is stored in
ROW MAJOR order), the address in U_global corresponding to the bottom
left most canonical cell in U is (xlow_local + ng) + (ylow_local + ng)*nx_global_all.
We call this quantity offset_global.
Similarly, we find the address within U of the bottom left most caonical cell.
This quantity, which we call offset_local, is equal to ng + ng*nx_all (think
about it) */
int offset_global = (xlow_local + ng) + (ylow_local + ng)*nx_global_all;
int offset_local = ng + ng*nx_all;
for (int k = 0; k < nfield; ++k) {
// Get the address of the kth subarray of U and U_global.
float* Uk_local = U + k*field_stride;
float* Uk_global = U_global + k*field_stride_global;
// Copy the canonical cells of U to their corresponding locations in U_global
copy_subgrid(Uk_global + offset_global, Uk_local + offset_local, nx, ny, stride_global, stride_local);
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_local_BC(float* restrict U,...
/**
* ### Derivatives with limiters
*
* In order to advance the time step, we also need to estimate
* derivatives of the fluxes and the solution values at each cell.
* In order to maintain stability, we apply a limiter here.
*
* The minmod limiter *looks* like it should be expensive to computer,
* since superficially it seems to require a number of branches.
* We do something a little tricky, getting rid of the condition
* on the sign of the arguments using the `copysign` instruction.
* If the compiler does the "right" thing with `max` and `min`
* for floating point arguments (translating them to branch-free
* intrinsic operations), this implementation should be relatively fast.
*/
// Branch-free computation of minmod of two numbers times 2s
static inline
float xmin2s(float s, float a, float b) {
float sa = copysignf(s, a);
float sb = copysignf(s, b);
float abs_a = fabsf(a);
float abs_b = fabsf(b);
float min_abs = (abs_a < abs_b ? abs_a : abs_b);
return (sa+sb) * min_abs;
}
// Limited combined slope estimate
static inline
float limdiff(float um, float u0, float up) {
const float theta = 2.0;
const float quarter = 0.25;
float du1 = u0 - um; // Difference to left
float du2 = up - u0; // Difference to right
float duc = up - um; // Twice centered difference
return xmin2s( quarter, xmin2s(theta, du1, du2), duc );
}
// Compute limited derivs
static inline
void limited_deriv1(float* restrict du,
const float* restrict u,
int ncell)
{
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i - 1], u[i], u[i + 1]);
}
// Compute limited derivs across stride
static inline
void limited_derivk(float* restrict du,
const float* restrict u,
int ncell, int stride)
{
assert(stride > 0);
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i - stride], u[i], u[i + stride]);
}
/**
* ### Advancing a time step
*
* Take one step of the numerical scheme. This consists of two pieces:
* a first-order corrector computed at a half time step, which is used
* to obtain new $F$ and $G$ values; and a corrector step that computes
* the solution at the full step. For full details, we refer to the
* [Jiang and Tadmor paper][jt].
*
* The `compute_step` function takes two arguments: the `io` flag
* which is the time step modulo 2 (0 if even, 1 if odd); and the `dt`
* flag, which actually determines the time step length. We need
* to know the even-vs-odd distinction because the Jiang-Tadmor
* scheme alternates between a primary grid (on even steps) and a
* staggered grid (on odd steps). This means that the data at $(i,j)$
* in an even step and the data at $(i,j)$ in an odd step represent
* values at different locations in space, offset by half a space step
* in each direction. Every other step, we shift things back by one
* mesh cell in each direction, essentially resetting to the primary
* indexing scheme.
*
* We're slightly tricky in the corrector in that we write
* $$
* v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j))
* $$
* where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the
* update formula, and $d(i,j)$ the $y$-derivative terms. This cuts
* the arithmetic cost a little (not that it's that big to start).
* It also makes it more obvious that we only need four rows worth
* of scratch space.
*/
// Predictor half-step
static
void central2d_predict(float* restrict U_half,
float* restrict scratch,
const float* restrict U,
const float* restrict FU,
const float* restrict GU,
float dtcdx2,
float dtcdy2,
int nx_all,
int ny_all,
int nfield)
{
/* Description: This function uses U, FU, and FG to calculate U_half (U on a
grid which is staggered by a half step with respect to the grid that U is on).
What are the arguments?
U_half, scratch, U, FU, GU - the addresses of the corresponding members of
a central2d structure. Each of U_half, U, FU, and GU are nfiled by nx_all
by ny_all arrays (3d arrays!)
dtcdx2 - (1/2)(dt/dx) (see central2d_step)
dtcdy2 -(1/2)(dt/dy) (see central2d_step)
nx_all - number of columns (x direction) of cells (including ghost cells).
ny_all - number of rows (y direction) of cells (including ghost cells).
nfield - the number of fields/subarrays/components in U, U_half, FU, FG. */
float* restrict FUx = scratch;
float* restrict GUy = scratch + nx_all;
for (int k = 0; k < nfield; ++k) {
for (int iy = 1; iy < ny_all - 1; ++iy) {
/* Why does this start at 1 and end at ny-1? Because we need to calculate a
derivative! Remember, the first and last ng rows/columns of the grid are
“ghost cells”. The ghost cells exist so that we can implement the periodic
boundary conditions. We really only care about the canonical cells.
Suppose that we want to calculate the x derivative of some quantity at the
(i,j) cell. For this to work, there needs to be (i-1, j) and (i+1, j) cells.
Thus, i can NOT be 0 or nx_all - 2 (the largest index in cell grid).
The same basic argument holds for y (we can’t calculate a y derivative if
y = 0 or y = ny_all - 2). Thus, we only calculate at the cells whose i,j
indicies are in {1,2.... nx_all-2} x {1,2.. ny_all-2}
This means that we don’t calculate the derivative on the first/last
row/column, but this is fine because those rows/columns are ghost cells! */
int offset = (k*ny_all + iy)*nx_all + 1;
/* What's offset?
Remember that F, G, u and v are nfield by nx_all by ny_all arrays.
For each k in {1,2... n_field}, FU + k*nx_all*ny_all is the starting address
of the kth subarray of FU. This sub array is stored in ROW MAJOR order.
Thus, offset is the starting address of the iyth row of the kth sub matrix of
FU or GU or U or U_half */
limited_deriv1(FUx + 1, FU + offset, nx_all - 2);
limited_derivk(GUy + 1, GU + offset, nx_all - 2, nx_all);
for (int ix = 1; ix < nx_all - 1; ++ix) {
int offset = (k*ny_all + iy)*nx_all + ix;
/* What is this offset?
This is the address of the (ix, iy) entry of the kth subarray of FU/GU/
U/U_half (remember, there are ghost cells!) */
// Calculate the (ix, iy) component of the kth component of U_half!
U_half[offset] = U[offset] - dtcdx2*FUx[ix] - dtcdy2*GUy[ix];
} // for (int ix = 1; ix < nx_all - 1; ++ix) {
} // for (int iy = 1; iy < ny_all - 1; ++iy) {
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_predict(float* restrict U_half,
// Corrector
static
void central2d_correct_sd(float* restrict s,
float* restrict d,
const float* restrict Uk_x,
const float* restrict Uk_y,
const float* restrict Uk,
const float* restrict FUk,
const float* restrict GUk,
float dtcdx2,
float dtcdy2,
int xlo,
int xhi)
{
/* Description: This function calculuates s and d, which are used to
updated U_half in central2d_correct, for a row of the grid. We calculate s
and d for each i in {xlo, xlo + 1,... xhi - 1}.
What are the arguments?
s, d - values which are used to calculate the "corrected" value of U_half.
Uk_x - the x derivative of the kth field/component/subarray of U.
Uk_y - the y derivative of the kth field/component/subarray of U.
Uk, FUk, GUk - the addresses of a row in the kth field/component/subarrays of
U, FU, and GK.
dtcdx2 - (1/2)(dt/dx) (see central2d_step)
dtcdy2 - (1/2)(dt/dy) (see central2d_step)
xhi, xlo - the upper and lower indicies within a particular row of the cell
grid upon which we want to calculate s and d. */
for (int ix = xlo; ix < xhi; ++ix) {
s[ix] =
0.2500f * (Uk [ix] + Uk [ix + 1]) +
0.0625f * (Uk_x[ix] - Uk_x[ix + 1]) +
dtcdx2 * (FUk [ix] - FUk [ix + 1]);
} // for (int ix = xlo; ix < xhi; ++ix) {
for (int ix = xlo; ix < xhi; ++ix) {
d[ix] =
0.0625f * (Uk_y[ix] + Uk_y[ix + 1]) +
dtcdy2 * (GUk [ix] + GUk [ix + 1]);
} // for (int ix = xlo; ix < xhi; ++ix) {
} // void central2d_correct_sd(float* restrict s,
// Corrector
static
void central2d_correct(float* restrict U_half,
float* restrict scratch,
const float* restrict U,
const float* restrict FU,
const float* restrict GU,
float dtcdx2,
float dtcdy2,
int xlo,
int xhi,
int ylo,
int yhi,
int nx_all,
int ny_all,
int nfield)
{
/* Description: This function "corrects" U_half (U on a grid which is
staggered by half a step with respect to the grid that U is on). The values of
U_half from central2d_predict were used to
update FU and GU. We then use these values to "correct" or update the values
of U_half.
What are the arguments?
U_half, stratch, U, FU, GU - members (of the same name) of the central2d
structure.
dtcdx2 - (1/2)(dt/dx) (see central2d_step).
dtcdy2 - (1/2)(dt/dy) (see central2d_step).
xlo - the index of the first column of canonical cells
xhi - the index of the last columns of canonical cells
ylo - the index of the first row of canonical cells
yhi - the index of the last row of canonical cells.
nx_all - number of columns (x direction) of cells (including ghost cells).
ny_all - number of rows (y direction) of cells (including ghost cells).
nfield - the number of fields/subarrays/components in U, U_half, FU, FG. */
assert(0 <= xlo && xlo < xhi && xhi <= nx_all);
assert(0 <= ylo && ylo < yhi && yhi <= ny_all);
// these hold the derivatives of u in the x and y directions.
float* restrict U_x = scratch;
float* restrict U_y = scratch + nx_all;
float* restrict s0 = scratch + 2*nx_all;
float* restrict d0 = scratch + 3*nx_all;
float* restrict s1 = scratch + 4*nx_all;
float* restrict d1 = scratch + 5*nx_all;
for (int k = 0; k < nfield; ++k) {
// Thus, U_half_k/Uk/FUk/GUk are the starting addresses of the kth sub-arrays
// of U_half/U/FU/GU.
float* restrict U_half_k = U_half + k*ny_all*nx_all;
const float* restrict Uk = U + k*ny_all*nx_all;
const float* restrict FUk = FU + k*ny_all*nx_all;
const float* restrict GUk = GU + k*ny_all*nx_all;
// Calculate derivatives of U in the x and y directions, use them to find
// s and d.
limited_deriv1(U_x + 1, Uk + ylo*nx_all + 1, nx_all - 2);
limited_derivk(U_y + 1, Uk + ylo*nx_all + 1, nx_all - 2, nx_all);
central2d_correct_sd( s1,
d1,
U_x, // Uk_x
U_y, // Uk_y
Uk + ylo*nx_all, // Uk
FUk + ylo*nx_all, // FUk
GUk + ylo*nx_all, // GUk
dtcdx2,
dtcdy2,
xlo,
xhi);
for (int iy = ylo; iy < yhi; ++iy) {
// swap s0 and s1... swap d0 and d1.
float* tmp;
tmp = s0; s0 = s1; s1 = tmp;
tmp = d0; d0 = d1; d1 = tmp;
// calculate derivatives of U in the x and y directions, use these to
// calculate s and d.
limited_deriv1(U_x + 1, Uk + (iy + 1)*nx_all + 1, nx_all - 2);
limited_derivk(U_y + 1, Uk + (iy + 1)*nx_all + 1, nx_all - 2, nx_all);
central2d_correct_sd( s1,
d1,
U_x, // Uk_x
U_y, // Uk_y
Uk + (iy + 1)*nx_all, // Uk
FUk + (iy + 1)*nx_all, // FUk
GUk + (iy + 1)*nx_all, // GUk
dtcdx2,
dtcdy2,
xlo,
xhi);
// Update U_half.
for (int ix = xlo; ix < xhi; ++ix) {
U_half_k[ix + iy*nx_all] = (s1[ix] + s0[ix]) - (d1[ix] - d0[ix]);
} // for (int ix = xlo; ix < xhi; ++ix) {
} // for (int iy = ylo; iy < yhi; ++iy) {
} // for (int k = 0; k < nfield; ++k) {
} // void central2d_correct(float* restrict U_half,
static
void central2d_step(float* restrict U,
float* restrict U_half,
float* restrict scratch,
float* restrict FU,
float* restrict GU,
int io,
int nx,
int ny,
int ng,
int nfield,
flux_t flux,
speed_t speed,
float dt,
float dx,
float dy)
{
/* Description: This function completes a time step.
What are the arguments?
U_half, U, stratch, FU, GU - the members (of the same name) of the central2d
structure.
io - (time step)% 2
nx - number of canonical cells in the x direction.
ny - number of canonical cells in the y direction.
ng - number of layers of ghost cells.
nfield - number of fields/components/subarrays of FU/GU/U/U_half.
flux, speed - pointers to the flux and speed functions (see see shallow2d.c)
dt - time step
dx, dy - dimensions of a cell. */
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
float dtcdx2 = 0.5*(dt/dx);
float dtcdy2 = 0.5*(dt/dy);
flux(FU, GU, U, nx_all*ny_all, nx_all*ny_all);
// Predictor step.
central2d_predict(U_half,
scratch,
U,
FU,
GU,
dtcdx2,
dtcdy2,
nx_all,
ny_all,
nfield);
// Calculate FU and GU using the values of U from the predictor step.
for (int iy = 1; iy < ny_all - 1; ++iy) {
int jj = iy*nx_all + 1;
flux(FU + jj, GU + jj, U_half + jj, nx_all - 2, nx_all*ny_all);
} // for (int iy = 1; iy < ny_all - 1; ++iy) {
// Corrector step.
central2d_correct(U_half + io*(nx_all + 1),
scratch,
U,
FU,
GU,
dtcdx2,
dtcdy2,
ng - io, // xlo
nx + ng - io, // xhi
ng - io, // ylo
ny + ng - io, // yhi
nx_all,
ny_all,
nfield);
} // void central2d_step(float* restrict U,...
/**
* ### Advance a fixed time
*
* The `run` method advances from time 0 (initial conditions) to time
* `tfinal`. Note that `run` can be called repeatedly; for example,
* we might want to advance for a period of time, write out a picture,
* advance more, and write another picture. In this sense, `tfinal`
* should be interpreted as an offset from the time represented by
* the simulator at the start of the call, rather than as an absolute time.
*
* We always take an even number of steps so that the solution
* at the end lives on the main grid instead of the staggered grid.
*/
static
int central2d_xrun(float* restrict U,
float* restrict U_half,
float* restrict scratch,
float* restrict FU,
float* restrict GU,
int nx,
int ny,
int ng,
int nfield,
flux_t flux,
speed_t speed,
float tfinal,
float dx,
float dy,
float cfl,
float* restrict U_global,
float* restrict shared_buffer,
int nx_global,
int ny_global,
const int xlow_local,
const int ylow_local)
{
/* Description: This function runs a simulation!
What are the arguments?
U, U_half, FU, GU - members (of the same name) of the central2d structure for
a piece of the partition of the global array.
nx - number of columns of canonical cells in U.
ny - number of rows of canonical cells in U.
ng - number of layers of ghost cells.
nfield - number of fields/components/subarrays of FU/GU/U/U_half. In our case,
this will be 3.
flux - pointer to a function which will updated F and G using U (see
shallow2d.c)
speed - a function which will find the maximum wave velocity in the x and y
directions (see shallow2d.c).
tfinal - the final time we want to work twoards.
dx, dy - dimensions of a cell.
cfl - used to determine the time step.
U_global - a pointer to the U array of the global sim.
shared_buffer - a pointer to a shared array. This array must have at least
1 element . This is used fo faciliate communication between the threads. In
particular, we will use this to calculate dt.
nx_global - number of rows of canonical cells in U_global.
ny_global - number of rows of canonical cells in U_global.
xlow_local - the rows in sim_local's U array correspond to rows xlow_local to
xlow_local + nx in U.
ylow_local - the columns in sim_local's U array correspond to columns
ylow_local to ylow_local + ny in U.*/
// Set up for the main loop!
int nstep = 0;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
bool done = false;
float t = 0;
while (!done) {
float cxy[2] = {1.0e-15f, 1.0e-15f};
/* What's going on here ( ^ )?
We set the elements of cxy to small but non-zero values so that when we
calculate dt, we don’t divide by zero! */
/* Apply periodic boundary conditions to U_global. This ensures that the
ghost cells of U_global can easily be pulled into the ghost cells of each
processors U. */
/* one way to improve this would be to eliminate the single directive.
Throw in for directives inside of this function, so that the different
threads complete different parts of the BCs in parallel. */
#pragma omp single
{
central2d_periodic( U_global,
nx_global,
ny_global,
ng,
nfield);
} // #pragma omp single
/* Get boundary information for U from U_global */
central2d_local_BC(U,
nx,
ny,
ng,
nfield,
U_global,
nx_global,
ny_global,
xlow_local,
ylow_local);
/* Calculate maximum wave speed in the x and y directions, use this and cfl
to determine dt.
To do this, each thread calculates the maximum x and y velocity for
it's U and uses this calculate dt given its data. The threads then one-by-
one store their values into the shared buffer.
Once every thread has written its data to the shared buffer, the buffer
contains the minimum dt, which each processor can then use. This is
essentially doing what an MPI reduce operation would do, but using openMP
to do it. */
/* calculate dt using the data on our piece of the partition. */
speed(cxy, U, nx_all*ny_all, nx_all*ny_all);
float dt_local = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy);
/* The first processor to complete its work can initialize the
shared buffer (we need to do this, the buffer may contain 0, which would
wreak havoc on the rest of the code). */
#pragma omp single
{
shared_buffer[0] = dt_local;
} // #pragma omp single
// Now, each thread writes its data to shared_buffer one by one.
#pragma omp critical
{
shared_buffer[0] = fmin(dt_local, shared_buffer[0]);
} // #pragma omp critical
#pragma omp barrier
/* shared_buffer[0] now contains the minimum dt (across the threads), we
can read it in to move on. */
float dt = shared_buffer[0];
/* Check if we are ready to stop looping. This is how the loop eventually
stops and ensures that we always stop at t final (think about it). */
if (t + 2*dt >= tfinal) {
dt = (tfinal - t)/2;
done = true;
} // if (t + 2*dt >= tfinal) {
// Compute an odd time step.
central2d_step( U,
U_half,
scratch,
FU,
GU,
0, // io
nx + 4, // nx
ny + 4, // ny
ng - 2, // ng
nfield,
flux,
speed,
dt,
dx,
dy);
// Compute an even time step. Note that the roles of U and U_half
// are swapped.
central2d_step( U_half,
U,
scratch,
FU,
GU,
1, // io
nx,
ny,
ng,
nfield,
flux,
speed,
dt,
dx,
dy);
t += 2*dt;
nstep += 2;
// copy boundary of U to corresponding entries of U global
central2d_local_to_global(U,
nx,
ny,
ng,
nfield,
U_global,
nx_global,
ny_global,
xlow_local,
ylow_local);
/* Wait until all threads are done copying their data into U. We need
to do this because at the start of the next time step, we're going to
apply periodic BCs, and that will only give the result we want if each
thread has written its cells into U_global. */
#pragma omp barrier
} // while (!done) {
/* Get global U ready for IO: Before we can print out the state of the system
(in the visualizer), we need each thread to write its local U to global U
(this is because the data must be written to file sequentially... this is
easiest to do if all of the data is in one place where one thread can write
it out to memory). */
central2d_U_to_global_U(U,
nx,
ny,
ng,
nfield,
U_global, // U_global
nx_global, // nx_global
ny_global, // ny_global
xlow_local,
ylow_local);
// return the number of time steps.
return nstep;
} // int central2d_xrun(float* restrict U,
int central2d_run(central2d_t* sim_local,
central2d_t* sim,
const int xlow_local,
const int ylow_local,
float tfinal)
{
/* Description: This is a wrapper for central2d_xrun.
What are the arguments?
sim_local - a central2d structure which corresponds to a piece of the global
grid.
sim - a central2d strcuture which corresponds to the global grid.
U_global - a pointer to the U array of the global simulation
xlow_local - the rows in sim_local's U array correspond to rows xlow_local to
xlow_local + sim_local->nx in U.
ylow_local - the columns in sim_local's U array correspond to columns
ylow_local to ylow_local + sim_local->ny in U.
tfinal - the final time in the simulation */
return central2d_xrun(sim_local->U,
sim_local->U_half,
sim_local->scratch,
sim_local->FU,
sim_local->GU,
sim_local->nx,
sim_local->ny,
sim_local->ng,
sim_local->nfield,
sim_local->flux,
sim_local->speed,
tfinal,
sim_local->dx,
sim_local->dy,
sim_local->cfl,
sim->U, // U_global
sim->scratch, // shared_buffer
sim->nx, // nx_global
sim->ny, // ny_global
xlow_local,
ylow_local);
} // int central2d_run(central2d_t* sim_local,...
|
GB_binop__band_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_uint8
// A.*B function (eWiseMult): GB_AemultB__band_uint8
// A*D function (colscale): GB_AxD__band_uint8
// D*A function (rowscale): GB_DxB__band_uint8
// C+=B function (dense accum): GB_Cdense_accumB__band_uint8
// C+=b function (dense accum): GB_Cdense_accumb__band_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint8
// C=scalar+B GB_bind1st__band_uint8
// C=scalar+B' GB_bind1st_tran__band_uint8
// C=A+scalar GB_bind2nd__band_uint8
// C=A'+scalar GB_bind2nd_tran__band_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
find_ellipse.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
#include "find_ellipse.h"
#include <sys/time.h>
// The number of sample points per ellipse
#define NPOINTS 150
// The expected radius (in pixels) of a cell
#define RADIUS 10
// The range of acceptable radii
#define MIN_RAD RADIUS - 2
#define MAX_RAD RADIUS * 2
// The number of different sample ellipses to try
#define NCIRCLES 7
#include "matrix2.h"
// Returns the current system time in microseconds
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the specified frame from the specified video file
// If cropped == true, the frame is cropped to pre-determined dimensions
// (hardcoded to the boundaries of the blood vessel in the test video)
// If scaled == true, all values are scaled to the range [0.0, 1.0]
MAT * get_frame(avi_t *cell_file, int frame_num, int cropped, int scaled) {
int dummy;
int width = AVI_video_width(cell_file);
int height = AVI_video_height(cell_file);
unsigned char *image_buf = (unsigned char *) malloc(width * height);
// There are 600 frames in this file (i.e. frame_num = 600 causes an error)
AVI_set_video_position(cell_file, frame_num);
//Read in the frame from the AVI
if(AVI_read_frame(cell_file, (char *)image_buf, &dummy) == -1) {
AVI_print_error("Error with AVI_read_frame");
exit(-1);
}
MAT * image_chopped;
if (cropped) {
// Crop and flip image so we deal only with the interior of the vein
image_chopped = chop_flip_image(image_buf, height, width, TOP, BOTTOM, 0, width - 1, scaled);
} else {
// Just flip the image
image_chopped = chop_flip_image(image_buf, height, width, 0, height - 1, 0, width - 1, scaled);
}
free(image_buf);
return image_chopped;
}
// Flips the specified image and crops it to the specified dimensions
MAT * chop_flip_image(unsigned char *image, int height, int width, int top, int bottom, int left, int right, int scaled) {
MAT * result = m_get(bottom - top + 1, right - left + 1);
int i, j;
if (scaled) {
double scale = 1.0 / 255.0;
for(i = 0; i <= (bottom - top); i++)
for(j = 0; j <= (right - left); j++)
//m_set_val(result, i, j, (double) image[((height - (i + top)) * width) + (j + left)] * scale);
m_set_val(result, i, j, (double) image[((height - 1 - (i + top)) * width) + (j + left)] * scale);
} else {
for(i = 0; i <= (bottom - top); i++)
for(j = 0; j <= (right - left); j++)
//m_set_val(result, i, j, (double) image[((height - (i + top)) * width) + (j + left)]);
m_set_val(result, i, j, (double) image[((height - 1 - (i + top)) * width) + (j + left)]);
}
return result;
}
// Given x- and y-gradients of a video frame, computes the GICOV
// score for each sample ellipse at every pixel in the frame
MAT * ellipsematching(MAT * grad_x, MAT * grad_y) {
int i, n, k;
// Compute the sine and cosine of the angle to each point in each sample circle
// (which are the same across all sample circles)
double sin_angle[NPOINTS], cos_angle[NPOINTS], theta[NPOINTS];
for (n = 0; n < NPOINTS; n++) {
theta[n] = (double) n * 2.0 * PI / (double) NPOINTS;
sin_angle[n] = sin(theta[n]);
cos_angle[n] = cos(theta[n]);
}
// Compute the (x,y) pixel offsets of each sample point in each sample circle
int tX[NCIRCLES][NPOINTS], tY[NCIRCLES][NPOINTS];
for (k = 0; k < NCIRCLES; k++) {
double rad = (double) (MIN_RAD + 2 * k);
for (n = 0; n < NPOINTS; n++) {
tX[k][n] = (int) (cos(theta[n]) * rad);
tY[k][n] = (int) (sin(theta[n]) * rad);
}
}
int MaxR = MAX_RAD + 2;
// Allocate memory for the result matrix
int height = grad_x->m, width = grad_x->n;
MAT * gicov = m_get(height, width);
// Split the work among multiple threads, if OPEN is defined
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for
for (i = MaxR; i < width - MaxR; i++) {
double Grad[NPOINTS];
int j, k, n, x, y;
for (j = MaxR; j < height - MaxR; j++) {
// Initialize the maximal GICOV score to 0
double max_GICOV = 0;
// Iterate across each stencil
for (k = 0; k < NCIRCLES; k++) {
// Iterate across each sample point in the current stencil
for (n = 0; n < NPOINTS; n++) {
// Determine the x- and y-coordinates of the current sample point
y = j + tY[k][n];
x = i + tX[k][n];
// Compute the combined gradient value at the current sample point
Grad[n] = m_get_val(grad_x, y, x) * cos_angle[n] + m_get_val(grad_y, y, x) * sin_angle[n];
}
// Compute the mean gradient value across all sample points
double sum = 0.0;
for (n = 0; n < NPOINTS; n++) sum += Grad[n];
double mean = sum / (double)NPOINTS;
// Compute the variance of the gradient values
double var = 0.0;
for (n = 0; n < NPOINTS; n++) {
sum = Grad[n] - mean;
var += sum * sum;
}
var = var / (double) (NPOINTS - 1);
// Keep track of the maximal GICOV value seen so far
if (mean * mean / var > max_GICOV) {
m_set_val(gicov, j, i, mean / sqrt(var));
max_GICOV = mean * mean / var;
}
}
}
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma111_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
return gicov;
}
// Returns a circular structuring element of the specified radius
MAT * structuring_element(int radius) {
MAT * result = m_get(radius*2+1, radius*2+1);
int i, j;
for(i = 0; i < result->m; i++) {
for(j = 0; j < result->n; j++) {
if(sqrt((float)((i-radius)*(i-radius)+(j-radius)*(j-radius))) <= radius)
m_set_val(result, i, j, 1.0);
else
m_set_val(result, i, j, 0.0);
}
}
return result;
}
// Performs an image dilation on the specified matrix
// using the specified structuring element
MAT * dilate_f(MAT * img_in, MAT * strel) {
MAT * dilated = m_get(img_in->m, img_in->n);
// Find the center of the structuring element
int el_center_i = strel->m / 2, el_center_j = strel->n / 2, i;
// Split the work among multiple threads, if OPEN is defined
{ const unsigned long long parallel_for_start = current_time_ns();
#pragma omp parallel for
for (i = 0; i < img_in->m; i++) {
int j, el_i, el_j, x, y;
for (j = 0; j < img_in->n; j++) {
double max = 0.0, temp;
// Iterate across the structuring element
for (el_i = 0; el_i < strel->m; el_i++) {
for (el_j = 0; el_j < strel->n; el_j++) {
y = i - el_center_i + el_i;
x = j - el_center_j + el_j;
// Make sure we have not gone off the edge of the matrix
if (y >=0 && x >= 0 && y < img_in->m && x < img_in->n && m_get_val(strel, el_i, el_j) != 0) {
// Determine if this is maximal value seen so far
temp = m_get_val(img_in, y, x);
if (temp > max) max = temp;
}
}
}
// Store the maximum value found
m_set_val(dilated, i, j, max);
}
} ;
const unsigned long long parallel_for_end = current_time_ns();
printf("pragma186_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); }
return dilated;
}
//M = # of sampling points in each segment
//N = number of segment of curve
//Get special TMatrix
MAT * TMatrix(unsigned int N, unsigned int M)
{
MAT * B = NULL, * LB = NULL, * B_TEMP = NULL, * B_TEMP_INV = NULL, * B_RET = NULL;
int * aindex, * bindex, * cindex, * dindex;
int i, j;
aindex = (int *)malloc(N*sizeof(int));
bindex = (int *)malloc(N*sizeof(int));
cindex = (int *)malloc(N*sizeof(int));
dindex = (int *)malloc(N*sizeof(int));
for(i = 1; i < N; i++)
aindex[i] = i-1;
aindex[0] = N-1;
for(i = 0; i < N; i++)
bindex[i] = i;
for(i = 0; i < N-1; i++)
cindex[i] = i+1;
cindex[N-1] = 0;
for(i = 0; i < N-2; i++)
dindex[i] = i+2;
dindex[N-2] = 0;
dindex[N-1] = 1;
B = m_get(N*M, N);
LB = m_get(M, N);
for(i = 0; i < N; i++)
{
m_zero(LB);
for(j = 0; j < M; j++)
{
double s = (double)j / (double)M;
double a, b, c, d;
a = (-1.0*s*s*s + 3.0*s*s - 3.0*s + 1.0) / 6.0;
b = (3.0*s*s*s - 6.0*s*s + 4.0) / 6.0;
c = (-3.0*s*s*s + 3.0*s*s + 3.0*s + 1.0) / 6.0;
d = s*s*s / 6.0;
m_set_val(LB, j, aindex[i], a);
m_set_val(LB, j, bindex[i], b);
m_set_val(LB, j, cindex[i], c);
m_set_val(LB, j, dindex[i], d);
}
int m, n;
for(m = i*M; m < (i+1)*M; m++)
for(n = 0; n < N; n++)
m_set_val(B, m, n, m_get_val(LB, m%M, n));
}
B_TEMP = mtrm_mlt(B, B, B_TEMP);
B_TEMP_INV = m_inverse(B_TEMP, B_TEMP_INV);
B_RET = mmtr_mlt(B_TEMP_INV, B, B_RET);
m_free(B);
m_free(LB);
m_free(B_TEMP);
m_free(B_TEMP_INV);
free(dindex);
free(cindex);
free(bindex);
free(aindex);
return B_RET;
}
void uniformseg(VEC * cellx_row, VEC * celly_row, MAT * x, MAT * y)
{
double dx[36], dy[36], dist[36], dsum[36], perm = 0.0, uperm;
int i, j, index[36];
for(i = 1; i <= 36; i++)
{
dx[i%36] = v_get_val(cellx_row, i%36) - v_get_val(cellx_row, (i-1)%36);
dy[i%36] = v_get_val(celly_row, i%36) - v_get_val(celly_row, (i-1)%36);
dist[i%36] = sqrt(dx[i%36]*dx[i%36] + dy[i%36]*dy[i%36]);
perm+= dist[i%36];
}
uperm = perm / 36.0;
dsum[0] = dist[0];
for(i = 1; i < 36; i++)
dsum[i] = dsum[i-1]+dist[i];
for(i = 0; i < 36; i++)
{
double minimum=DBL_MAX, temp;
int min_index = 0;
for(j = 0; j < 36; j++)
{
temp = fabs(dsum[j]- (double)i*uperm);
if (temp < minimum)
{
minimum = temp;
min_index = j;
}
}
index[i] = min_index;
}
for(i = 0; i < 36; i++)
{
m_set_val(x, 0, i, v_get_val(cellx_row, index[i]));
m_set_val(y, 0, i, v_get_val(celly_row, index[i]));
}
}
//Get minimum element in a matrix
double m_min(MAT * m)
{
int i, j;
double minimum = DBL_MAX, temp;
for(i = 0; i < m->m; i++)
{
for(j = 0; j < m->n; j++)
{
temp = m_get_val(m, i, j);
if(temp < minimum)
minimum = temp;
}
}
return minimum;
}
//Get maximum element in a matrix
double m_max(MAT * m)
{
int i, j;
double maximum = DBL_MIN, temp;
for(i = 0; i < m->m; i++)
{
for(j = 0; j < m->n; j++)
{
temp = m_get_val(m, i, j);
if(temp > maximum)
maximum = temp;
}
}
return maximum;
}
VEC * getsampling(MAT * m, int ns)
{
int N = m->n > m->m ? m-> n:m->m, M = ns;
int * aindex, * bindex, * cindex, * dindex;
int i, j;
VEC * retval = v_get(N*M);
aindex = (int *)malloc(N*sizeof(int));
bindex = (int *)malloc(N*sizeof(int));
cindex = (int *)malloc(N*sizeof(int));
dindex = (int *)malloc(N*sizeof(int));
for(i = 1; i < N; i++)
aindex[i] = i-1;
aindex[0] = N-1;
for(i = 0; i < N; i++)
bindex[i] = i;
for(i = 0; i < N-1; i++)
cindex[i] = i+1;
cindex[N-1] = 0;
for(i = 0; i < N-2; i++)
dindex[i] = i+2;
dindex[N-2] = 0;
dindex[N-1] = 1;
for(i = 0; i < N; i++)
{
for(j = 0; j < M; j++)
{
double s = (double)j / (double)M;
double a, b, c, d;
a = m_get_val(m, 0, aindex[i]) * (-1.0*s*s*s + 3.0*s*s - 3.0*s + 1.0);
b = m_get_val(m, 0, bindex[i]) * (3.0*s*s*s - 6.0*s*s + 4.0);
c = m_get_val(m, 0, cindex[i]) * (-3.0*s*s*s + 3.0*s*s + 3.0*s + 1.0);
d = m_get_val(m, 0, dindex[i]) * s*s*s;
v_set_val(retval, i*M+j,(a+b+c+d)/6.0);
}
}
free(dindex);
free(cindex);
free(bindex);
free(aindex);
return retval;
}
VEC * getfdriv(MAT * m, int ns)
{
int N = m->n > m->m ? m-> n:m->m, M = ns;
int * aindex, * bindex, * cindex, * dindex;
int i, j;
VEC * retval = v_get(N*M);
aindex = (int *)malloc(N*sizeof(int));
bindex = (int *)malloc(N*sizeof(int));
cindex = (int *)malloc(N*sizeof(int));
dindex = (int *)malloc(N*sizeof(int));
for(i = 1; i < N; i++)
aindex[i] = i-1;
aindex[0] = N-1;
for(i = 0; i < N; i++)
bindex[i] = i;
for(i = 0; i < N-1; i++)
cindex[i] = i+1;
cindex[N-1] = 0;
for(i = 0; i < N-2; i++)
dindex[i] = i+2;
dindex[N-2] = 0;
dindex[N-1] = 1;
for(i = 0; i < N; i++)
{
for(j = 0; j < M; j++)
{
double s = (double)j / (double)M;
double a, b, c, d;
a = m_get_val(m, 0, aindex[i]) * (-3.0*s*s + 6.0*s - 3.0);
b = m_get_val(m, 0, bindex[i]) * (9.0*s*s - 12.0*s);
c = m_get_val(m, 0, cindex[i]) * (-9.0*s*s + 6.0*s + 3.0);
d = m_get_val(m, 0, dindex[i]) * (3.0 *s*s);
v_set_val(retval, i*M+j, (a+b+c+d)/6.0);
}
}
free(dindex);
free(cindex);
free(bindex);
free(aindex);
return retval;
}
//Performs bilinear interpolation, getting the values of m specified in the vectors X and Y
MAT * linear_interp2(MAT * m, VEC * X, VEC * Y)
{
//Kind of assumes X and Y have same len!
MAT * retval = m_get(1, X->dim);
double x_coord, y_coord, new_val, a, b;
int l, k, i;
for(i = 0; i < X->dim; i++)
{
x_coord = v_get_val(X, i);
y_coord = v_get_val(Y, i);
l = (int)x_coord;
k = (int)y_coord;
a = x_coord - (double)l;
b = y_coord - (double)k;
//printf("xc: %f \t yc: %f \t i: %d \t l: %d \t k: %d \t a: %f \t b: %f\n", x_coord, y_coord, i, l, k, a, b);
new_val = (1.0-a)*(1.0-b)*m_get_val(m, k, l) +
a*(1.0-b)*m_get_val(m, k, l+1) +
(1.0-a)*b*m_get_val(m, k+1, l) +
a*b*m_get_val(m, k+1, l+1);
m_set_val(retval, 0, i, new_val);
}
return retval;
}
void splineenergyform01(MAT * Cx, MAT * Cy, MAT * Ix, MAT * Iy, int ns, double delta, double dt, int typeofcell)
{
VEC * X, * Y, * Xs, * Ys, * Nx, * Ny, * X1, * Y1, * X2, * Y2, * XY, * XX, * YY, * dCx, * dCy, * Ix1, * Ix2, *Iy1, *Iy2;
MAT * Ix1_mat, * Ix2_mat, * Iy1_mat, * Iy2_mat;
int i,j, N, * aindex, * bindex, * cindex, * dindex;
X = getsampling(Cx, ns);
Y = getsampling(Cy, ns);
Xs = getfdriv(Cx, ns);
Ys = getfdriv(Cy, ns);
Nx = v_get(Ys->dim);
for(i = 0; i < Nx->dim; i++)
v_set_val(Nx, i, v_get_val(Ys, i) / sqrt(v_get_val(Xs, i)*v_get_val(Xs, i) + v_get_val(Ys, i)*v_get_val(Ys, i)));
Ny = v_get(Xs->dim);
for(i = 0; i < Ny->dim; i++)
v_set_val(Ny, i, -1.0 * v_get_val(Xs, i) / sqrt(v_get_val(Xs, i)*v_get_val(Xs, i) + v_get_val(Ys, i)*v_get_val(Ys, i)));
X1 = v_get(Nx->dim);
for(i = 0; i < X1->dim; i++)
v_set_val(X1, i, v_get_val(X, i) + delta*v_get_val(Nx, i));
Y1 = v_get(Ny->dim);
for(i = 0; i < Y1->dim; i++)
v_set_val(Y1, i, v_get_val(Y, i) + delta*v_get_val(Ny, i));
X2 = v_get(Nx->dim);
for(i = 0; i < X2->dim; i++)
v_set_val(X2, i, v_get_val(X, i) - delta*v_get_val(Nx, i));
Y2 = v_get(Ny->dim);
for(i = 0; i < Y2->dim; i++)
v_set_val(Y2, i, v_get_val(Y, i) + delta*v_get_val(Ny, i));
Ix1_mat = linear_interp2(Ix, X1, Y1);
Iy1_mat = linear_interp2(Iy, X1, Y1);
Ix2_mat = linear_interp2(Ix, X2, Y2);
Iy2_mat = linear_interp2(Iy, X2, Y2);
Ix1 = v_get(Ix1_mat->n);
Iy1 = v_get(Iy1_mat->n);
Ix2 = v_get(Ix2_mat->n);
Iy2 = v_get(Iy2_mat->n);
Ix1 = get_row(Ix1_mat, 0, Ix1);
Iy1 = get_row(Iy1_mat, 0, Iy1);
Ix2 = get_row(Ix2_mat, 0, Ix2);
Iy2 = get_row(Iy2_mat, 0, Iy2);
N = Cx->m;
//VEC * retval = v_get(N*ns);
aindex = (int *)malloc(N*sizeof(int));
bindex = (int *)malloc(N*sizeof(int));
cindex = (int *)malloc(N*sizeof(int));
dindex = (int *)malloc(N*sizeof(int));
for(i = 1; i < N; i++)
aindex[i] = i-1;
aindex[0] = N-1;
for(i = 0; i < N; i++)
bindex[i] = i;
for(i = 0; i < N-1; i++)
cindex[i] = i+1;
cindex[N-1] = 0;
for(i = 0; i < N-2; i++)
dindex[i] = i+2;
dindex[N-2] = 0;
dindex[N-1] = 1;
XY = v_get(Xs->dim);
for(i = 0; i < Xs->dim; i++)
v_set_val(XY, i, v_get_val(Xs, i) * v_get_val(Ys, i));
XX = v_get(Xs->dim);
for(i = 0; i < Xs->dim; i++)
v_set_val(XX, i, v_get_val(Xs, i) * v_get_val(Xs, i));
YY = v_get(Ys->dim);
for(i = 0; i < Xs->dim; i++)
v_set_val(YY, i, v_get_val(Ys, i) * v_get_val(Ys, i));
dCx = v_get(Cx->m);
dCy = v_get(Cy->m);
//get control points for splines
for(i = 0; i < Cx->m; i++)
{
for(j = 0; j < ns; j++)
{
double s = (double)j / (double)ns;
double A1, A2, A3, A4, B1, B2, B3, B4, D, D_3, Tx1, Tx2, Tx3, Tx4, Ty1, Ty2, Ty3, Ty4;
int k;
A1 = (-1.0*(s-1.0)*(s-1.0)*(s-1.0)) / 6.0;
A2 = (3.0*s*s*s - 6.0*s*s + 4.0) / 6.0;
A3 = (-3.0*s*s*s + 3.0*s*s + 3.0*s + 1.0) / 6.0;
A4 = s*s*s / 6.0;
B1 = (-3.0*s*s + 6.0*s - 3.0) / 6.0;
B2 = (9.0*s*s - 12.0*s) / 6.0;
B3 = (-9.0*s*s + 6.0*s + 3.0) / 6.0;
B4 = 3.0*s*s / 6.0;
k = i*ns+j;
D = sqrt(v_get_val(Xs, k)*v_get_val(Xs, k) + v_get_val(Ys, k)*v_get_val(Ys, k));
D_3 = D*D*D;
//1st control point
Tx1 = A1 - delta * v_get_val(XY, k) * B1 / D_3;
Tx2 = -1.0 * delta*(B1/D - v_get_val(XX, k)*B1/D_3);
Tx3 = A1 + delta * v_get_val(XY, k) * B1 / D_3;
Tx4 = delta*(B1/D - v_get_val(XX, k)*B1/D_3);
Ty1 = delta*(B1/D - v_get_val(YY, k)*B1/D_3);
Ty2 = A1 + delta * v_get_val(XY, k) * B1 / D_3;
Ty3 = -1.0 * delta*(B1/D - v_get_val(YY, k)*B1/D_3);
Ty4 = A1 - delta * v_get_val(XY, k) * B1 / D_3;
v_set_val(dCx, aindex[i], v_get_val(dCx, aindex[i]) + Tx1*v_get_val(Ix1, k) + Tx2*v_get_val(Iy1,k) - Tx3*v_get_val(Ix2, k) - Tx4*v_get_val(Iy2, k));
v_set_val(dCy, aindex[i], v_get_val(dCy, aindex[i]) + Ty1*v_get_val(Ix1, k) + Ty2*v_get_val(Iy1,k) - Ty3*v_get_val(Ix2, k) - Ty4*v_get_val(Iy2, k));
//2nd control point
Tx1 = A2 - delta * v_get_val(XY, k) * B2 / D_3;
Tx2 = -1.0 * delta*(B2/D - v_get_val(XX, k)*B2/D_3);
Tx3 = A2 + delta * v_get_val(XY, k) * B2 / D_3;
Tx4 = delta*(B2/D - v_get_val(XX, k)*B2/D_3);
Ty1 = delta*(B2/D - v_get_val(YY, k)*B2/D_3);
Ty2 = A2 + delta * v_get_val(XY, k) * B2 / D_3;
Ty3 = -1.0 * delta*(B2/D - v_get_val(YY, k)*B2/D_3);
Ty4 = A2 - delta * v_get_val(XY, k) * B2 / D_3;
v_set_val(dCx, bindex[i], v_get_val(dCx, bindex[i]) + Tx1*v_get_val(Ix1, k) + Tx2*v_get_val(Iy1,k) - Tx3*v_get_val(Ix2, k) - Tx4*v_get_val(Iy2, k));
v_set_val(dCy, bindex[i], v_get_val(dCy, bindex[i]) + Ty1*v_get_val(Ix1, k) + Ty2*v_get_val(Iy1,k) - Ty3*v_get_val(Ix2, k) - Ty4*v_get_val(Iy2, k));
//3nd control point
Tx1 = A3 - delta * v_get_val(XY, k) * B3 / D_3;
Tx2 = -1.0 * delta*(B3/D - v_get_val(XX, k)*B3/D_3);
Tx3 = A3 + delta * v_get_val(XY, k) * B3 / D_3;
Tx4 = delta*(B3/D - v_get_val(XX, k)*B3/D_3);
Ty1 = delta*(B3/D - v_get_val(YY, k)*B3/D_3);
Ty2 = A3 + delta * v_get_val(XY, k) * B3 / D_3;
Ty3 = -1.0 * delta*(B3/D - v_get_val(YY, k)*B3/D_3);
Ty4 = A3 - delta * v_get_val(XY, k) * B3 / D_3;
v_set_val(dCx, cindex[i], v_get_val(dCx, cindex[i]) + Tx1*v_get_val(Ix1, k) + Tx2*v_get_val(Iy1,k) - Tx3*v_get_val(Ix2, k) - Tx4*v_get_val(Iy2, k));
v_set_val(dCy, cindex[i], v_get_val(dCy, cindex[i]) + Ty1*v_get_val(Ix1, k) + Ty2*v_get_val(Iy1,k) - Ty3*v_get_val(Ix2, k) - Ty4*v_get_val(Iy2, k));
//4nd control point
Tx1 = A4 - delta * v_get_val(XY, k) * B4 / D_3;
Tx2 = -1.0 * delta*(B4/D - v_get_val(XX, k)*B4/D_3);
Tx3 = A4 + delta * v_get_val(XY, k) * B4 / D_3;
Tx4 = delta*(B4/D - v_get_val(XX, k)*B4/D_3);
Ty1 = delta*(B4/D - v_get_val(YY, k)*B4/D_3);
Ty2 = A4 + delta * v_get_val(XY, k) * B4 / D_3;
Ty3 = -1.0 * delta*(B4/D - v_get_val(YY, k)*B4/D_3);
Ty4 = A4 - delta * v_get_val(XY, k) * B4 / D_3;
v_set_val(dCx, dindex[i], v_get_val(dCx, dindex[i]) + Tx1*v_get_val(Ix1, k) + Tx2*v_get_val(Iy1,k) - Tx3*v_get_val(Ix2, k) - Tx4*v_get_val(Iy2, k));
v_set_val(dCy, dindex[i], v_get_val(dCy, dindex[i]) + Ty1*v_get_val(Ix1, k) + Ty2*v_get_val(Iy1,k) - Ty3*v_get_val(Ix2, k) - Ty4*v_get_val(Iy2, k));
}
}
if(typeofcell==1)
{
for(i = 0; i < Cx->n; i++)
m_set_val(Cx, 0, i, m_get_val(Cx, 1, i) - dt*v_get_val(dCx, i));
for(i = 0; i < Cy->n; i++)
m_set_val(Cy, 0, i, m_get_val(Cy, 1, i) - dt*v_get_val(dCy, i));
}
else
{
for(i = 0; i < Cx->n; i++)
m_set_val(Cx, 0, i, m_get_val(Cx, 1, i) + dt*v_get_val(dCx, i));
for(i = 0; i < Cy->n; i++)
m_set_val(Cy, 0, i, m_get_val(Cy, 1, i) + dt*v_get_val(dCy, i));
}
v_free(dCy); v_free(dCx); v_free(YY); v_free(XX); v_free(XY);
free(dindex); free(cindex); free(bindex); free(aindex);
v_free(Iy2); v_free(Ix2); v_free(Iy1); v_free(Ix1);
m_free(Iy2_mat); m_free(Ix2_mat); m_free(Iy1_mat); m_free(Ix1_mat);
v_free(Y2); v_free(X2); v_free(Y1); v_free(X1); v_free(Ny); v_free(Nx); v_free(Ys); v_free(Xs); v_free(Y); v_free(X);
}
|
for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for foo
void test_no_clause() {
int i;
#pragma omp for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for' must be a for loop}}
#pragma omp for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp parallel
#pragma omp for linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
#pragma omp for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for collapse(2)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 {{private variable cannot be reduction}}
// expected-error@+1 {{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp for
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif //ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#endif
//Precision to use for calculations
#define fptype float
#define NUM_RUNS 100
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i=begin; i!=end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-5 ){
fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j=0; j<NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 0;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
return 0;
}
#endif //ENABLE_TBB
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
if (argc != 4)
{
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
exit(1);
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
//Read input data from file
file = fopen(inputFile, "r");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", inputFile);
exit(1);
}
rv = fscanf(file, "%i", &numOptions);
if(rv != 1) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval);
if(rv != 9) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", inputFile);
exit(1);
}
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
HANDLE *threads;
int *nums;
threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE));
nums = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
free(tids);
#endif //WIN32
#else //ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid=0;
bs_thread(&tid);
#else //ENABLE_TBB
//serial version
int tid=0;
bs_thread(&tid);
#endif //ENABLE_TBB
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
pr91401-2.c | #pragma omp declare target
void f0 (void);
void
f1 (void)
{
int i;
#pragma omp distribute dist_schedule(static) dist_schedule(static) /* { dg-warning "too many 'dist_schedule' clauses" } */
for (i = 0; i < 8; ++i)
f0 ();
#pragma omp distribute dist_schedule(static,2) dist_schedule(static,4) /* { dg-warning "too many 'dist_schedule' clauses" } */
for (i = 0; i < 8; ++i)
f0 ();
}
#pragma omp end declare target
|
conv3x3s1_winograd64_pack4_neon_AoA.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_pack4_neon_AoA(const Mat& bottom_blob, Mat& top_blob, const Mat& _bias, const Option& opt,
int outch, int inch, int outh, int outw)
{
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const float* bias = _bias;
Mat top_blob_tm = bottom_blob;
Mat top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float32x4_t _bias0 = bias ? vld1q_f32( (const float*)bias + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm/8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m=0; m<8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m=0; m<6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
}
}
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of ordering operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
// TODO(srivrohi): remove support for real data type in mxnet-2.0
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("int64", mshadow::kInt64)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
// TODO(srivrohi): remove support for real data type in mxnet-2.0
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("int64", mshadow::kInt64)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
};
inline void ParseTopKParam(const TShape& src_shape,
const TopKParam& param,
TShape *target_shape,
size_t *batch_size,
index_t *element_num,
int *axis,
index_t *k,
bool *do_transpose,
bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
if (src_shape[*axis] != 0) {
*batch_size = src_shape.Size() / src_shape[*axis];
}
*element_num = src_shape[*axis];
if (*axis != src_shape.ndim() - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 0 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
struct fill_ind_to_one {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const index_t* indices, DType* out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const index_t* indices, const DType* val,
int req, DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, index_t>& ind,
const Tensor<cpu, 1, char>& work,
index_t K, index_t N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const index_t M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType *vals = reinterpret_cast<DType*>(work.dptr_);
DType *sorted_vals = dat.dptr_+i*N;
index_t *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const index_t& i1, const index_t& i2){
return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const index_t& i1, const index_t& i2){
return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const index_t& i1, const index_t& i2){
return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const index_t& i1, const index_t& i2){
return vals[i1] > vals[i2]; });
}
}
for (index_t j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, index_t ind1, DType val2, index_t ind2,
bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(index_t K, DType *val1, index_t *ind1, DType *val2, index_t *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
index_t i1(K-1), i2(K-1);
for (index_t i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (index_t i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(index_t K, index_t N, DType *val, index_t *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ index_t buff[];
// Start of buffer sections associated with this thread.
const index_t offset(threadIdx.x*K);
index_t *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (index_t i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const index_t first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (index_t i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
index_t cur_ind(ind[i]);
for (index_t j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j],
ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (index_t s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (index_t i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, index_t>& ind,
const Tensor<gpu, 1, char>& work,
index_t K, index_t N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const index_t M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
size_t id_size = PadBytes(sizeof(index_t) * ind.size(0), alignment);
Tensor<gpu, 1, index_t> batch_id(reinterpret_cast<index_t*>(work.dptr_),
Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(DType)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template<typename xpu, typename DType, typename IDType>
void TopKImpl(const RunContext &ctx,
const Resource &resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 0. If input shape is 0-shape, directly return
if (src.Size() == 0) return;
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, index_t> indices, sel_indices;
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>())
<< "'index_t' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is "
<< element_num << ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<index_t>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
// Temp space needed by the full sorts.
size_t temp_size = std::max(
mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()),
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(DType) * src.Size());
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment)
+ PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment);
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1},
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
mxnet::TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu, typename DType>
size_t TopKWorkspaceSize(const TBlob& src,
const TopKParam& param,
size_t *temp_size_ptr) {
using namespace mshadow;
using namespace mshadow::expr;
size_t batch_size = 0;
size_t temp_size;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
// Temp space needed by the full sorts.
temp_size = std::max(
mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()),
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(DType) * src.Size());
*temp_size_ptr = temp_size;
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment)
+ PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment);
}
return workspace_size;
}
template<typename xpu, typename DType, typename IDType>
void TopKImplwithWorkspace(const RunContext &ctx,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param,
char* workspace_curr_ptr,
const size_t &temp_size,
Stream<xpu>* s) {
using namespace mshadow;
using namespace mshadow::expr;
// 0. If input shape is 0-shape, directly return
if (src.Size() == 0) return;
// 1. Parse and initialize information
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, index_t> indices, sel_indices;
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>())
<< "'index_t' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is "
<< element_num << ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<index_t>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1},
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
mxnet::TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
})
});
} else {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
});
}
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0],
outputs, topk_param);
});
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx,
ctx.requested[0], req, inputs[0], outputs, topk_param);
});
});
}
template<typename xpu, typename DType, typename IDType>
void TopKBackwardImpl(const OpContext &ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
mxnet::TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is " << element_num
<< ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 1, index_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, index_t>(Shape1(batch_size * k + batch_size), s);
Tensor<xpu, 1, index_t> sel_indices =
Tensor<xpu, 1, index_t>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, index_t> batch_shift =
Tensor<xpu, 1, index_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, index_t{0}, element_num, kWriteTo,
batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<index_t>(indices);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(tcast<index_t>(indices) +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
mxnet::TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0] || kAddTo == req[0]) {
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
}
mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_,
out_grad.dptr_,
req[0],
in_grad.dptr_);
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param);
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKBackwardImpl<xpu, DType, index_t>(ctx, inputs, req, outputs, param);
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
// out_attr[0] -> stores value
// out_attr[1] -> stores indices
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
#if MXNET_USE_INT64_TENSOR_SIZE == 1
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64))
#else
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
#endif
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype))
<< "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
mxnet::TShape& in_shape = (*in_attrs)[0];
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
mxnet::TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
#if MXNET_USE_INT64_TENSOR_SIZE == 1
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64))
#else
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
#endif
<< "Failed to set the type of ret_indices";
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
subCycleStrongCubatureVolumeHex3D.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(subCycleStrongCubatureVolumeHex3D)(const int & Nelements,
const int * __restrict__ elementList,
const dfloat * __restrict__ cubD,
const dfloat * __restrict__ cubInterpT,
const int & offset,
const int & cubatureOffset,
const int & NSOffset,
const dfloat * __restrict__ invLumpedMassMatrix,
const dfloat * __restrict__ BdivW,
const dfloat & c0,
const dfloat & c1,
const dfloat & c2,
const dfloat * __restrict__ conv,
const dfloat * __restrict__ S,
dfloat * __restrict__ NU) {
// (phi, U.grad Ud)
dfloat r_c[3] = {c0, c1,c2};
dfloat s_cubD[p_cubNq][p_cubNq];
dfloat s_cubInterpT[p_Nq][p_cubNq];
dfloat s_U[p_cubNq][p_cubNq];
dfloat s_Ud[p_cubNq][p_cubNq];
dfloat s_Ud1[p_Nq][p_cubNq];
dfloat r_U2[p_cubNq][p_cubNq][p_cubNq];
dfloat r_Ud[p_cubNq][p_cubNq][p_cubNq];
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
const int id = i + j * p_cubNq;
if (id < p_Nq * p_cubNq) {
s_cubInterpT[j][i] = cubInterpT[id];
}
s_cubD[j][i] = cubD[id];
}
}
#ifdef __NEKRS__OMP__
#pragma omp parallel for private(s_U, s_Ud, s_Ud1, r_U2, r_Ud)
#endif
for (int e = 0; e < Nelements; ++e) {
const int element = elementList[e];
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
r_Ud[j][i][k] = 0;
}
}
}
#pragma unroll
for (int c = 0; c < p_Nq; ++c) {
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
const int id = element * p_Np + c * p_Nq * p_Nq + b * p_Nq + a;
s_Ud[b][a] = S[id];
}
}
// interpolate in 'r'
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Ud1 = 0;
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
dfloat Iia = s_cubInterpT[a][i];
Ud1 += Iia * s_Ud[b][a];
}
s_Ud1[b][i] = Ud1;
}
}
// interpolate in 's'
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Ud2 = 0;
// interpolate in b
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
dfloat Ijb = s_cubInterpT[b][j];
Ud2 += Ijb * s_Ud1[b][i];
}
// interpolate in c progressively
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Ikc = s_cubInterpT[c][k];
r_Ud[j][i][k] += Ikc * Ud2;
}
}
}
}
// Uhat * dr
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Udr = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Din = s_cubD[i][n];
Udr += Din * r_Ud[j][n][k];
}
dfloat Uhat = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
Uhat += coeff * conv[id + 0 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] = Uhat * Udr;
}
}
}
// Vhat * ds
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Uds = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Djn = s_cubD[j][n];
Uds += Djn * r_Ud[n][i][k];
}
dfloat Vhat = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
Vhat += coeff * conv[id + 1 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] += Vhat * Uds;
}
}
}
// What * dt
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Udt = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Dkn = s_cubD[k][n];
Udt += Dkn * r_Ud[j][i][n];
}
dfloat What = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
What += coeff * conv[id + 2 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] += What * Udt;
}
}
}
// now project back in t
#pragma unroll
for (int c = 0; c < p_Nq; ++c) {
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat rhsU = 0;
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Ikc = s_cubInterpT[c][k];
rhsU += Ikc * r_U2[j][i][k];
}
s_U[j][i] = rhsU;
}
}
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat rhsU = 0;
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
dfloat Ijb = s_cubInterpT[b][j];
rhsU += Ijb * s_U[j][i];
}
s_Ud[b][i] = rhsU;
}
}
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
dfloat rhsU = 0;
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Iia = s_cubInterpT[a][i];
rhsU += Iia * s_Ud[b][i];
}
const int id = element * p_Np + c * p_Nq * p_Nq + b * p_Nq + a;
dfloat invLMM = p_MovingMesh ? 0.0 : invLumpedMassMatrix[id];
dfloat bdivw = 0.0;
if (p_MovingMesh) {
#pragma unroll
for (int s = 0; s < p_nEXT; s++) {
const dfloat coeff = r_c[s];
invLMM += coeff * invLumpedMassMatrix[id + s * offset];
bdivw += coeff * BdivW[id + s * offset];
}
}
NU[id + NSOffset] = (rhsU - bdivw * S[id]) * invLMM;
}
}
}
}
}
|
PointCloud.h | #ifndef POINTCLOUD_H
#define POINTCLOUD_H
#include <iostream>
#include <memory>
#include <thread>
#include <cstdio>
#include <vector>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <string>
#include <Eigen/Core>
#include <omp.h>
#include "utility.h"
#include "kdtree.h"
namespace PPP{
namespace geometry{
class PointCloud{
public:
PointCloud(){}
PointCloud(const std::vector<Eigen::Vector3d> &points){
points_ = points;
}
~PointCloud(){}
public:
//example=================================================================
std::string example(){
return std::string("import My_HSI_LiDAR as HSIL\n ")+
"import numpy as np\n"+
"pcd = HSIL.geometry.PointCloud()\n"+
"np_points = np.random.rand(100, 3)\n"+
"pcd.points = HSIL.utility.Vector3dVector(np_points)\n"+
"pcd.NormalEstimate(5)\n"+
"np.asarray(pcd.normals)";
}
//test cube cloud============================================================
void cube_cloud(int size, double longx, double longy,
double longz,double offx = 0.,double offy = 0.,double offz = 0.){
Eigen::Vector3d offvec(offx, offy, offz);
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(rand()/double(RAND_MAX)*longx,0.,
rand()/double(RAND_MAX)*longz);
Eigen::Vector3d color1(0.,0.,
1.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(rand()/double(RAND_MAX)*longx,longy,
rand()/double(RAND_MAX)*longz);
Eigen::Vector3d color1(1.,0.,
0.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(0. ,rand()/double(RAND_MAX)*longy,
rand()/double(RAND_MAX)*longz);
Eigen::Vector3d color1(0.,1.,
0.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(longx ,rand()/double(RAND_MAX)*longy,
rand()/double(RAND_MAX)*longz);
Eigen::Vector3d color1(1.,1.,
0.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(rand()/double(RAND_MAX)*longx ,rand()/double(RAND_MAX)*longy,
0.);
Eigen::Vector3d color1(1.,0.,
1.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
for(int i = 0; i < size; ++i)
{
Eigen::Vector3d dot1(rand()/double(RAND_MAX)*longx ,rand()/double(RAND_MAX)*longy,
longz);
Eigen::Vector3d color1(0.5,0.5,
1.);
points_.push_back(dot1+offvec);
colors_.push_back(color1);
}
}
//清除点云===================================================================
PointCloud &Clear(){
points_.clear();
normals_.clear();
colors_.clear();
curvatures_.clear();
neighbors_.clear();
return *this;
}
//==========================================================================
bool HasPoints() const { return points_.size() > 0; }
bool HasNormals() const {
return points_.size() > 0 && normals_.size() == points_.size();
}
bool HasCurvatures() const {
return curvatures_.size() > 0 && curvatures_.size() == points_.size();
}
bool HasColors() const {
return points_.size() > 0 && colors_.size() == points_.size();
}
bool IsEmpty() const { return !HasPoints(); }
vector<int> GetNeighbor(int i){
return neighbors_[i];
}
PointCloud &operator+=(const PointCloud &cloud) {
// We do not use std::vector::insert to combine std::vector because it will
// crash if the pointcloud is added to itself.
if (cloud.IsEmpty()) return (*this);
size_t old_vert_num = points_.size();
size_t add_vert_num = cloud.points_.size();
size_t new_vert_num = old_vert_num + add_vert_num;
if ((!HasPoints() || HasNormals()) && cloud.HasNormals()) {
normals_.resize(new_vert_num);
for (size_t i = 0; i < add_vert_num; i++)
normals_[old_vert_num + i] = cloud.normals_[i];
} else {
normals_.clear();
}
if ((!HasPoints() || HasColors()) && cloud.HasColors()) {
colors_.resize(new_vert_num);
for (size_t i = 0; i < add_vert_num; i++)
colors_[old_vert_num + i] = cloud.colors_[i];
} else {
colors_.clear();
}
points_.resize(new_vert_num);
for (size_t i = 0; i < add_vert_num; i++)
points_[old_vert_num + i] = cloud.points_[i];
return (*this);
}
PointCloud operator+(const PointCloud &cloud) const {
return (PointCloud(*this) += cloud);
}
//===================================================================
geometry::PointCloud& SelectDownSample(
geometry::PointCloud &output,
std::vector<int> indices,
bool invert /* = false */){
output.Clear();
bool has_normals = HasNormals();
bool has_colors = HasColors();
bool has_curvature = HasCurvatures();
std::vector<bool> mask = std::vector<bool>(points_.size(), invert);
for (size_t i : indices) {
mask[i] = !invert;
}
for (size_t i = 0; i < points_.size(); i++) {
if (mask[i]) {
output.points_.push_back(points_[i]);
if (has_normals) output.normals_.push_back(normals_[i]);
if (has_colors) output.colors_.push_back(colors_[i]);
if (has_curvature) output.curvatures_.push_back(curvatures_[i]);
}
}
return output;
}
//////////////////////===========================
///
geometry::PointCloud project(geometry::PointCloud &project_point_, vector<double> Ln){
double Lx = Ln[0];
double Ly = Ln[1];
double Lz = Ln[2];
project_point_.points_.resize(points_.size() );
for(int i = 0; i < points_.size(); ++i)
{
double x0 = points_[i](0);
double y0 = points_[i](1);
double z0 = points_[i](2);
double lambda = z0/Lz;
project_point_.points_[i](0) = x0 - lambda * Lx;
project_point_.points_[i](1) = y0 - lambda * Ly;
project_point_.points_[i](2) = lambda;
}
return project_point_;
}
//kdtree======================================================================
void ComputeNeighbor(int nn){
geometry::KDTreeFlann kdtree;
kdtree.setCloud(points_);
neighbors_.clear();
neighbors_.resize(points_.size());
for(size_t i = 0; i < points_.size(); ++i){
std::vector<int> new_indices_vec(nn);
std::vector<double> new_dists_vec(nn);
kdtree.SearchKNN(points_[i], nn, new_indices_vec,
new_dists_vec);
neighbors_[i] = new_indices_vec;
}
}
void ClearNeighbor(){
neighbors_.clear();
}
bool HasNeighbors(){
if(neighbors_.size() == 0){
return false;
}else{
return true;
}
}
int NeighborsSize(){
if(HasNeighbors()){
return neighbors_[0].size();
}else{
return 0;
}
}
//计算法线=========================================================================
void NormalEstimate(){
normals_.resize(points_.size() );
curvatures_.resize(points_.size() );
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (size_t i = 0; i < points_.size(); ++i) {
Eigen::Vector4d normal0(0., 0., 0., 0.);
std::vector<Eigen::Vector3d > neighbor_point;
int number_each_neighbour = static_cast<int>(neighbors_[i].size() );
for (int j = 0; j < number_each_neighbour; ++j) {
int point_index = neighbors_[i][j];
neighbor_point.push_back(points_[point_index] );
}
std::vector<double> weights(std::distance(neighbor_point.begin(), neighbor_point.end()), 1.0);
PCAEstimateNormal(neighbor_point.begin(), neighbor_point.end(), weights, normal0);
if(normal0(2) >= 0){
normals_[i](0) = normal0(0);
normals_[i](1) = normal0(1);
normals_[i](2) = normal0(2);
}else {
normals_[i](0) = -normal0(0);
normals_[i](1) = -normal0(1);
normals_[i](2) = -normal0(2);
}
curvatures_[i] = normal0(3);
}
}
void NormalEstimatek(int nn, bool fast_normal_computation){
geometry::KDTreeFlann kdtree;
kdtree.setCloud(points_);
normals_.resize(points_.size() );
curvatures_.resize(points_.size() );
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (size_t i = 0; i < points_.size(); ++i) {
std::vector<int> new_indices_vec(nn);
std::vector<double> new_dists_vec(nn);
kdtree.SearchKNN(points_[i], nn, new_indices_vec,
new_dists_vec);
Eigen::Vector4d vec = ComputeNormal(*this, new_indices_vec, fast_normal_computation);
normals_[i](0) = vec(0);
normals_[i](1) = vec(1);
normals_[i](2) = vec(2);
curvatures_[i] = vec(3);
}
}
Eigen::Vector4d ComputeNormal(const PointCloud &cloud,
const std::vector<int> &indices,
bool fast_normal_computation) {
if (indices.size() == 0) {
return Eigen::Vector4d::Zero();
}
Eigen::Matrix3d covariance;
Eigen::Matrix<double, 9, 1> cumulants;
cumulants.setZero();
for (size_t i = 0; i < indices.size(); i++) {
const Eigen::Vector3d &point = cloud.points_[indices[i]];
cumulants(0) += point(0);
cumulants(1) += point(1);
cumulants(2) += point(2);
cumulants(3) += point(0) * point(0);
cumulants(4) += point(0) * point(1);
cumulants(5) += point(0) * point(2);
cumulants(6) += point(1) * point(1);
cumulants(7) += point(1) * point(2);
cumulants(8) += point(2) * point(2);
}
cumulants /= (double)indices.size();
covariance(0, 0) = cumulants(3) - cumulants(0) * cumulants(0);
covariance(1, 1) = cumulants(6) - cumulants(1) * cumulants(1);
covariance(2, 2) = cumulants(8) - cumulants(2) * cumulants(2);
covariance(0, 1) = cumulants(4) - cumulants(0) * cumulants(1);
covariance(1, 0) = covariance(0, 1);
covariance(0, 2) = cumulants(5) - cumulants(0) * cumulants(2);
covariance(2, 0) = covariance(0, 2);
covariance(1, 2) = cumulants(7) - cumulants(1) * cumulants(2);
covariance(2, 1) = covariance(1, 2);
if (fast_normal_computation) {
return FastEigen3x3(covariance);
} else {
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> solver;
solver.compute(covariance, Eigen::ComputeEigenvectors);
Eigen::Vector3d normal = solver.eigenvectors().col(0);
// double curvature = solver.eigenvalues().minCoeff() / solver.eigenvalues().sum();
return Eigen::Vector4d(normal(0), normal(1), normal(2), 0.);
}
}
//==========================================================================
Eigen::Vector3d GetMinBound() const{
BoundingBox box(points_);
return box.min_values;
}
Eigen::Vector3d GetMaxBound() const{
BoundingBox box(points_);
return box.max_values;
}
public:
std::vector<Eigen::Vector3d> points_;
std::vector<Eigen::Vector3d> colors_;
std::vector<Eigen::Vector3d> normals_;
std::vector<double> curvatures_;
std::vector<std::vector<int> > neighbors_;
};
}//namespace geometry
}//namespace PPP
#endif //POINTCLOUD_H
|
5969.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp parallel for simd
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp parallel for simd
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp parallel for simd
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp target teams distribute
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
bfm_mixed_solver.h | // -*- mode:c++; c-basic-offset: 4 -*-
#ifndef INCLUDED_BFM_MIXED_SOLVER_HT_H
#define INCLUDED_BFM_MIXED_SOLVER_HT_H
#include <bfm.h>
#include <util/lattice/bfm_evo.h>
#include <alg/enum_int.h>
#include <omp.h>
#include <pthread.h>
#include <cstdio>
#include <cstdlib>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/time.h>
namespace mixed_cg
{
// check if 2 instances of bfm agree on what they are going to do.
template < typename Float_out, typename Float_in >
inline bool check (bfm_evo < Float_out > &bfm_out,
bfm_evo < Float_in > &bfm_in)
{
if (bfm_out.node_latt[0] != bfm_in.node_latt[0]) return false;
if (bfm_out.node_latt[1] != bfm_in.node_latt[1]) return false;
if (bfm_out.node_latt[2] != bfm_in.node_latt[2]) return false;
if (bfm_out.node_latt[3] != bfm_in.node_latt[3]) return false;
if (bfm_out.Ls != bfm_in.Ls) return false;
if (bfm_out.precon_5d != bfm_in.precon_5d) return false;
return true;
}
// Convert between single/double precision bfm fermions
template < typename Float_out, typename Float_in >
inline void threaded_convFermion (Fermion_t out, Fermion_t in,
bfm_evo < Float_out > &bfm_out,
bfm_evo < Float_in > &bfm_in)
{
int me = bfm_out.thread_barrier ();
if (!check (bfm_out, bfm_in)) {
if (bfm_out.isBoss () && !me) {
printf ("Output/Input fermions don't match.\n");
}
exit (-1);
}
// Simple copy, this shouldn't be called, right?
if (sizeof (Float_out) == sizeof (Float_in)) {
bfm_out.copy (out, in);
return;
}
// otherwise, we do the conversion
//
// Note: this function is running under threaded environment.
int Nspinco = 12;
int out_i_inc = bfm_out.simd () * 2;
int in_i_inc = bfm_in.simd () * 2;
int out_lat[5] = { bfm_out.node_latt[0],
bfm_out.node_latt[1],
bfm_out.node_latt[2],
bfm_out.node_latt[3],
bfm_out.Ls
};
int vol5d_out =
out_lat[0] * out_lat[1] * out_lat[2] * out_lat[3] * out_lat[4];
int thrlen, throff;
bfm_out.thread_work_nobarrier (vol5d_out, me, thrlen, throff);
Float_out *outf = (Float_out *) out;
Float_in *inf = (Float_in *) in;
for (int site = throff; site < throff + thrlen; ++site) {
int x[4], s;
int si = site;
x[0] = si % out_lat[0]; si = si / out_lat[0];
x[1] = si % out_lat[1]; si = si / out_lat[1];
x[2] = si % out_lat[2]; si = si / out_lat[2];
x[3] = si % out_lat[3]; s = si / out_lat[3];
// both in and out must have the same preconditioning scheme.
int sp = bfm_out.precon_5d ? s : 0;
if ((x[0] + x[1] + x[2] + x[3] + sp & 0x1) == 1) {
if (!cps::GJP.Gparity ()) {
int out_base = bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1);
int in_base = bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1);
for (int co = 0; co < Nspinco; co++) {
for (int reim = 0; reim < 2; reim++) {
int out_id = out_base + reim + co * out_i_inc;
int in_id = in_base + reim + co * in_i_inc;
outf[out_id] = inf[in_id];
}} //co,reim
}
else {
#ifndef BFM_GPARITY
printf("Compiled with BFM without Gparity\n");exit(-43);
#else
//G-parity checkerboard ordering stacks the second flavour after the first on each checkerboard : cb0[f0 f1]cb1[f0 f1]
#ifdef BFM_GPARITY
int out_base[2] =
{ bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 0),
bfm_out.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 1) };
int in_base[2] =
{ bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 0),
bfm_in.bagel_idx5d (x, s, 0, 0, Nspinco, 1, 1) };
# else
int out_base[2] =
{ bfm_out.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 0),
bfm_out.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 1) };
int in_base[2] =
{ bfm_in.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 0),
bfm_in.bagel_gparity_idx5d (x, s, 0, 0, Nspinco, 1, 1) };
# endif
for (int flav = 0; flav < 2; flav++) {
for (int co = 0; co < Nspinco; co++) {
for (int reim = 0; reim < 2; reim++) {
int out_id = out_base[flav] + reim + co * out_i_inc;
int in_id = in_base[flav] + reim + co * in_i_inc;
outf[out_id] = inf[in_id];
}} //co,reim
} //flav
#endif
}
} //cb
} //xyzts
}
// Convert between single/double precision bfm fermions
// CK: And do it quickly! The original takes as long as an Mprec!
template < typename Float_out, typename Float_in >
inline void threaded_convFermion_fast (Fermion_t out, Fermion_t in,
bfm_evo < Float_out > &bfm_out,
bfm_evo < Float_in > &bfm_in)
{
// Simple copy, this shouldn't be called, right?
if (sizeof (Float_out) == sizeof (Float_in)) {
return bfm_out.copy (out, in);
}
#if 0
const static int nspinco = 12;
int me, thrlen, throff;
int work =
(bfm_out.gparity ? 2 : 1) * bfm_out.cbLs * bfm_out.simd_cbvol *
bfm_out.nsimd * nspinco * 2;
bfm_out.thread_work (work, me, thrlen, throff);
Float_in *x = (Float_in *) in + throff;
Float_out *y = (Float_out *) out + throff;
for (int s = 0; s < thrlen; ++s)
y[s] = x[s];
bfm_out.thread_barrier ();
#else
//Use bfm-3.2 (imported) precisionChange method
if (sizeof (Float_out) == sizeof (double))
bfm_out.precisionChange (in, out, SingleToDouble, 0);
else
bfm_out.precisionChange (in, out, DoubleToSingle, 0);
#endif
}
// Reinitialize communication subsystem.
// Check bfmcommspi.C in the bfm package to see if this can be avoided.
template < typename Float_new, typename Float_old >
inline void switch_comm (bfm_evo < Float_new > &bfm_new,
bfm_evo < Float_old > &bfm_old)
{
if (static_cast < void *>(&bfm_new)
== static_cast < void *>(&bfm_old))
return;
int me = bfm_old.thread_barrier ();
if (!me) {
bfm_old.comm_end ();
bfm_new.comm_init ();
// Question: how do we propagate the reinitialized information
// to other threads?
// Answer: thread barrier does this.
}
bfm_new.thread_barrier ();
}
// Both sol and src are double precision fermions. Single precision
// solver is only used internally.
//
// Things to be set before using this function:
//
// double precision solver mass, stopping condition, max iteration
// number.
//
// single precision solver mass, stopping condition, max iteration
// number.
//
// Gauge field must be initialized for both double and single prec
// solvers.
//
// the communication subsystem must be ready for bfm_d to use (due to
// the way bfmcommspi is written, one must reinitialize the
// communication object when switching between single and double
// precisions).
//
// max_cycle: the maximum number of restarts will be performed.
// N is the number of low modes removed (subtracted) from the final solution. All evecs will be used for the deflated solve, this just makes the solution a 'high-mode' solution for use in A2A propagators.
//
//EIGENVECTORS SHOULD BE SINGLE PRECISION!
inline int threaded_cg_mixed_MdagM (Fermion_t sol, Fermion_t src,
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f, int max_cycle,
cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d < Fermion_t[2] > *evec = NULL,
multi1d < float >*eval = NULL, int N = 0)
{
int me = bfm_d.thread_barrier ();
if (bfm_f.isBoss () && !me) {
printf
("cg_mixed_MdagM: bfm_d.CGdiagonalMee = %d, bfm_f.CGdiagonalMee = %d\n",
bfm_d.CGdiagonalMee, bfm_f.CGdiagonalMee);
}
double frsd = bfm_f.residual;
Fermion_t src_d = bfm_d.threadedAllocFermion ();
Fermion_t tv1_d = bfm_d.threadedAllocFermion ();
Fermion_t tv2_d = bfm_d.threadedAllocFermion ();
Fermion_t sol_f = bfm_f.threadedAllocFermion ();
Fermion_t src_f = bfm_f.threadedAllocFermion ();
double src_norm = bfm_d.norm (src);
double stop = src_norm * bfm_d.residual * bfm_d.residual;
if (bfm_f.isBoss () && !me) {
printf ("cg_mixed_MdagM: src_norm = %17.10e\n", src_norm);
}
int iter = 0;
for (int i = 0; i < max_cycle; ++i) {
// compute double precision rsd and also new RHS vector.
bfm_d.Mprec (sol, tv1_d, src_d, 0, 0);
bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2latt_fbfm.bf._d = MdagM * sol
double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.);
if (bfm_f.isBoss () && !me) {
printf
("CPS cg_mixed_MdagM: iter = %d rsd = %17.10e(d) stop = %17.10e(d)\n",
i, norm, stop);
}
// my ad hoc stopping condition
if ((i < (max_cycle - 1)) && (norm < 100. * stop))
break;
// will this cause a deadlock when combined with the
// condition above? i.e., will we lose a factor of huge
// factor in the accuracy of rsd when converting from
// single to double?
if (!me)
while (norm * bfm_f.residual * bfm_f.residual < stop)
bfm_f.residual *= 2;
//bfm_d.thread_barrier(); //not needed as next line has a barrier at the beginning
// bfm_f.residual = sqrt(stop/norm);
threaded_convFermion (src_f, src_d, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
bfm_f.set_zero (sol_f);
switch (itype) {
case cps::CG:
if (evec && eval && (*eval).size () > 0) {
//CK: NOTE it is the single-precision bfm instance doing the deflation. All of its linalg assumes then single precision fermions, *including the eigenvectors*
if (bfm_f.isBoss () && !me)
printf ("bfm_evo::deflating with %d eigen vectors.\n",
(*eval).size ());
bfm_f.deflate (sol_f, src_f, evec, eval, (*eval).size ());
}
iter += bfm_f.CGNE_prec_MdagM (sol_f, src_f);
break;
case cps::EIGCG:
iter += bfm_f.Eig_CGNE_prec (sol_f, src_f);
break;
default:
if (bfm_f.isBoss () && !me) {
printf ("cg_mixed_MdagM: unsupported inverter type.\n");
}
exit (-1);
}
switch_comm (bfm_d, bfm_f);
threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f);
bfm_d.axpy (sol, tv1_d, sol, 1.);
}
iter += bfm_d.CGNE_prec_MdagM (sol, src);
if (N > 0) { // Subtract N low modes from final sol, usually for all to all propagators.
// TODO is it legal to only use the single precision eval?
threaded_convFermion (src_f, src, bfm_f, bfm_d);
bfm_f.deflate (sol_f, src_f, evec, eval, N);
threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f);
bfm_d.axpy (sol, tv1_d, sol, -1.);
}
bfm_d.threadedFreeFermion (src_d);
bfm_d.threadedFreeFermion (tv1_d);
bfm_d.threadedFreeFermion (tv2_d);
bfm_f.threadedFreeFermion (sol_f);
bfm_f.threadedFreeFermion (src_f);
double sol_norm = bfm_d.norm (sol);
if (bfm_d.isBoss () && !me) {
printf
("cg_mixed_MdagM: final sol norm = %17.10e ; final iter count = %d\n",
sol_norm, iter);
}
bfm_f.residual = frsd;
return iter;
}
// Not implemented for older BFM
//#ifdef BFM_GPARITY
#if 1
inline int threaded_cg_mixed_MMdag (Fermion_t sol, Fermion_t src,
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f, int max_cycle,
cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d < Fermion_t[2] > *evec = NULL,
multi1d < float >*eval = NULL, int N = 0)
{
int me = bfm_d.thread_barrier ();
if (bfm_f.isBoss () && !me) {
printf
("cg_mixed_MMdag: bfm_d.CGdiagonalMee = %d, bfm_f.CGdiagonalMee = %d\n",
bfm_d.CGdiagonalMee, bfm_f.CGdiagonalMee);
}
double frsd = bfm_f.residual;
Fermion_t src_d = bfm_d.threadedAllocFermion ();
Fermion_t tv1_d = bfm_d.threadedAllocFermion ();
Fermion_t tv2_d = bfm_d.threadedAllocFermion ();
Fermion_t sol_f = bfm_f.threadedAllocFermion ();
Fermion_t src_f = bfm_f.threadedAllocFermion ();
double src_norm = bfm_d.norm (src);
double stop = src_norm * bfm_d.residual * bfm_d.residual;
if (bfm_f.isBoss () && !me) {
printf ("cg_mixed_MMdag: src_norm = %17.10e\n", src_norm);
}
int iter = 0;
for (int i = 0; i < max_cycle; ++i) {
// compute double precision rsd and also new RHS vector.
bfm_d.Mprec (sol, tv1_d, src_d, 1, 0);
bfm_d.Mprec (tv1_d, tv2_d, src_d, 0, 0); // tv2_d = MMdag * sol
double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.);
if (bfm_f.isBoss () && !me) {
printf ("cg_mixed_MMdag: iter = %d rsd = %17.10e(d) stop = %17.10e(d)\n",
i, norm, stop);
}
// my ad hoc stopping condition
if (norm < 100. * stop)
break;
// will this cause a deadlock when combined with the
// condition above? i.e., will we lose a factor of huge
// factor in the accuracy of rsd when converting from
// single to double?
while (norm * bfm_f.residual * bfm_f.residual < stop)
bfm_f.residual *= 2;
threaded_convFermion (src_f, src_d, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
bfm_f.set_zero (sol_f);
switch (itype) {
case cps::CG:
if (evec && eval && N) {
bfm_f.deflate (sol_f, src_f, evec, eval, N);
}
iter += bfm_f.CGNE_prec_MMdag (sol_f, src_f);
break;
/*case cps::EIGCG:
iter += bfm_f.Eig_CGNE_prec(sol_f, src_f);
break; */
default:
if (bfm_f.isBoss () && !me) {
printf ("cg_mixed_MMdag: unsupported inverter type.\n");
}
exit (-1);
}
switch_comm (bfm_d, bfm_f);
threaded_convFermion (tv1_d, sol_f, bfm_d, bfm_f);
bfm_d.axpy (sol, tv1_d, sol, 1.);
}
bfm_d.threadedFreeFermion (src_d);
bfm_d.threadedFreeFermion (tv1_d);
bfm_d.threadedFreeFermion (tv2_d);
bfm_f.threadedFreeFermion (sol_f);
bfm_f.threadedFreeFermion (src_f);
iter += bfm_d.CGNE_prec_MMdag (sol, src);
double sol_norm = bfm_d.norm (sol);
if (bfm_d.isBoss () && !me) {
printf
("cg_mixed_MMdag: final sol norm = %17.10e ; final iter count = %d\n",
sol_norm, iter);
}
bfm_f.residual = frsd;
bfm_f.thread_barrier (); //make sure no threads are waiting to write to bfm_f.residual while others have moved onto something else that potentially changes bfm_f.residual
return iter;
}
#endif //#ifdef BFM_GPARITY
// apply single precision solver to double precision vectors. Both
// sol_d and src_d are in double precision.
//
// sol_f and src_f are auxiliary fermions in single
// precision. Their content will be overriden after calling this
// function.
//
// If import_guess == true, then we import sol_d as an initial
// guess, other we do a zero start CG.
inline int cg_single_prec (Fermion_t sol_d, Fermion_t src_d,
Fermion_t sol_f, Fermion_t src_f,
bfm_evo < double >&bfm_d, bfm_evo < float >&bfm_f)
{
threaded_convFermion (src_f, src_d, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
bfm_f.set_zero (sol_f);
int iter = bfm_f.CGNE_prec_MdagM (sol_f, src_f);
switch_comm (bfm_d, bfm_f);
threaded_convFermion (sol_d, sol_f, bfm_d, bfm_f);
return iter;
}
// cg_MdagM_single_precnd: Nested CG, single precision solver is
// used as a preconditioner.
//
// Calling interface is the same as threaded_cg_mixed_MdagM().
inline int cg_MdagM_single_precnd (Fermion_t sol, Fermion_t src,
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f)
{
int me = bfm_d.thread_barrier ();
double frsd = bfm_f.residual;
Fermion_t r = bfm_d.threadedAllocFermion ();
Fermion_t minvr = bfm_d.threadedAllocFermion ();
Fermion_t d = bfm_d.threadedAllocFermion ();
Fermion_t ad = bfm_d.threadedAllocFermion ();
Fermion_t aad = bfm_d.threadedAllocFermion ();
Fermion_t tv1 = bfm_d.threadedAllocFermion ();
Fermion_t sol_f = bfm_f.threadedAllocFermion ();
Fermion_t src_f = bfm_f.threadedAllocFermion ();
const double src_norm = bfm_d.norm (src);
const double stop = src_norm * bfm_d.residual * bfm_d.residual;
int iter_s = 0;
bfm_d.Mprec (sol, ad, tv1, 0, 0);
bfm_d.Mprec (ad, aad, tv1, 1, 0); // aad = MdagM * x0 (double prec)
bfm_d.axpy (r, aad, src, -1.0); // r0 = b - MdagM * x0
iter_s += cg_single_prec (minvr, r, sol_f, src_f, bfm_d, bfm_f);
bfm_d.copy (d, minvr); // d0 = (M'dagM')^(-1) * r0
double rtminvr = bfm_d.inner_real (r, minvr);
int k = 1;
for (; k <= bfm_d.max_iter; ++k) {
double dtad = bfm_d.Mprec (d, ad, tv1, 0, 1);
bfm_d.Mprec (ad, aad, tv1, 1, 0); // aad = MdagM * d[k] (double prec)
double alpha = rtminvr / dtad;
bfm_d.axpy (sol, d, sol, alpha);
double rsd = bfm_d.axpy_norm (r, aad, r, -alpha);
// check watch file
FILE *fp = fopen ("stop.file", "r");
if (fp) {
fclose (fp);
printf ("Found watchfile stop.file\n");
return 1;
}
// check stopping condition
if (rsd < stop) {
// compute true residual
bfm_d.Mprec (sol, ad, tv1, 0, 0);
bfm_d.Mprec (ad, aad, tv1, 1, 0);
double true_rsd = bfm_d.axpy_norm (tv1, aad, src, -1.0);
if (bfm_d.isBoss () && !me) {
printf
("cg_MdagM_single_precnd: converged in %d(d)+%d(s) iterations.\n", k,
iter_s);
printf ("cg_MdagM_single_precnd: true residual = %17.10e.\n",
sqrt (true_rsd / src_norm));
}
break;
}
iter_s += cg_single_prec (minvr, r, sol_f, src_f, bfm_d, bfm_f);
double tmp = bfm_d.inner_real (r, minvr);
double beta = tmp / rtminvr;
rtminvr = tmp;
bfm_d.axpby (d, minvr, d, 1.0, beta);
}
if (k > bfm_d.max_iter) {
if (bfm_d.isBoss () && !me) {
printf
("cg_MdagM_single_precnd: CG not converged in %d(d)+%d(s) iterations.\n",
k, iter_s);
}
}
bfm_d.threadedFreeFermion (r);
bfm_d.threadedFreeFermion (minvr);
bfm_d.threadedFreeFermion (d);
bfm_d.threadedFreeFermion (ad);
bfm_d.threadedFreeFermion (aad);
bfm_d.threadedFreeFermion (tv1);
bfm_f.threadedFreeFermion (sol_f);
bfm_f.threadedFreeFermion (src_f);
bfm_f.residual = frsd;
//bfm_f.thread_barrier(); //no need, barrier in next call
bfm_d.CGNE_prec_MdagM (sol, src);
return k + iter_s;
}
inline int threaded_cg_mixed_M (Fermion_t sol[2], Fermion_t src[2],
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f, int max_cycle,
cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d < Fermion_t[2] > *evec = NULL,
multi1d < float >*eval = NULL, int N = 0)
{
int me = bfm_d.thread_barrier ();
Fermion_t be = bfm_d.threadedAllocFermion ();
Fermion_t bo = bfm_d.threadedAllocFermion ();
Fermion_t ta = bfm_d.threadedAllocFermion ();
Fermion_t tb = bfm_d.threadedAllocFermion ();
double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]);
if (bfm_d.isBoss () && !me) {
printf ("threaded_cg_mixed_M: source norm is %17.10e\n", nsrc);
}
// eo preconditioning
bfm_d.MooeeInv (src[Even], ta, DaggerNo);
bfm_d.Meo (ta, tb, Odd, DaggerNo); // tb == Moe Mee^{-1} src[e]
bfm_d.axpy (ta, tb, src[Odd], -1.0);
bfm_d.Mprec (ta, bo, tb, DaggerYes); // bo = Mprec^dag (src[o] - Moe Mee^{-1} src[e])
int iter =
threaded_cg_mixed_MdagM (sol[Odd], bo, bfm_d, bfm_f, max_cycle, itype, evec,
eval, N);
bfm_d.Meo (sol[Odd], ta, Even, DaggerNo);
bfm_d.axpy (tb, ta, src[Even], -1.0);
bfm_d.MooeeInv (tb, sol[Even], DaggerNo);
double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]);
// compute final residual
Fermion_t tmp[2] = { be, bo };
bfm_d.Munprec (sol, tmp, ta, DaggerNo);
double ndiff = 0.;
for (int i = 0; i < 2; ++i) {
bfm_d.axpy (tb, tmp[i], src[i], -1.0);
ndiff += bfm_d.norm (tb);
}
if (bfm_d.isBoss () && !me) {
printf
("threaded_cg_mixed_M: unprec sol norm = %17.10e, residual = %17.10e\n",
nsol, sqrt (ndiff / nsrc));
}
bfm_d.threadedFreeFermion (be);
bfm_d.threadedFreeFermion (bo);
bfm_d.threadedFreeFermion (ta);
bfm_d.threadedFreeFermion (tb);
return iter;
}
inline double sigma_sum_recurse (const int &i, const int &start, const int &N,
double shifts[], const int &nprod_remaining)
{
double out = 0.0;
for (int j = start; j < N; j++) {
if (j == i)
continue; //skip i
double toadd = shifts[j];
if (nprod_remaining > 1) {
toadd *= sigma_sum_recurse (i, j + 1, N, shifts, nprod_remaining - 1);
}
out += toadd;
}
return out;
}
inline double sigma_prod (const int &i, const int &n, const int &N,
double shifts[])
{
if (n == N - 1)
return 1.0;
int prod_size = N - 1 - n;
return sigma_sum_recurse (i, 0, N, shifts, prod_size);
}
/*CK: Implementation of multi-shift with guesses from
J.~C.~Osborn,
``Initial guesses for multi-shift solvers,''
PoS LATTICE {\bf 2008} (2008) 029
[arXiv:0810.1081 [hep-lat]].
*/
template < typename Float >
int CGNE_prec_MdagM_multi_shift_with_guesses (Fermion_t psi[],
Fermion_t src,
Fermion_t guesses[],
double mass[],
double alpha[],
int nshift,
const double mresidual_in[],
int single,
bfm_evo < Float > &bfm)
{
//'single' appears to sum all the solutions into psi[0]
int me = bfm.thread_barrier ();
//Threads take a local copy of mresidual_in[] so wno worries about race conditions
double mresidual[nshift];
for (int i = 0; i < nshift; ++i)
mresidual[i] = mresidual_in[i];
//Form combination of guesses that results in a new source y in the common Krylov space
// w = \sum_i c_i guess_i where c_i = \Prod_{j!=i} 1/(shift[j]-shift[i])
Fermion_t w = bfm.threadedAllocFermion ();
bfm.set_zero (w);
for (int i = 0; i < nshift; i++) {
double c_i = 1.0;
for (int j = 0; j < nshift; j++) {
if (j == i)
continue;
c_i *= 1.0 / (mass[j] - mass[i]);
}
bfm.axpy (w, guesses[i], w, c_i);
if (bfm.isBoss () && (!me))
printf ("CGNE_prec_MdagM_multi_shift_with_guesses: c_%d = %e\n", i, c_i);
}
if (bfm.isBoss () && (!me))
printf ("CGNE_prec_MdagM_multi_shift_with_guesses: w norm = %e\n",
bfm.norm (w));
//Form y_i = Prod_{j!=i} (MMdag + shift[j]) w these are all in the same Krylov space
Fermion_t y[nshift];
for (int i = 0; i < nshift; i++) {
y[i] = bfm.threadedAllocFermion (mem_fast);
bfm.set_zero (y[i]);
}
//We can multiply out. Let MMdag = B
//y_i = Prod_{j!=i, 0<=j<=N-1} (MMdag + shift[j]) w
// = B^{N-1}w
// + (sum_{j!=i, j<=N-1} shift[j])B^{N-2}w
// + sum_{j!=i, j<=N-2} shift[j] * ( sum_{k!=i,k>j, k<=N-1} shift[k] ) B^{N-3}w
// +...
//The coefficient of a term B^n is the sum of all the unique products of (N-1)-n elements of the set of shifts excluding shift[i]: { shift[0], shift[1],.. excl shift[i].... shift[N-2], shift[N-1] }
//We take a product of size zero to have value 1.0 (as we would with factorials). Indices all start from 0 and end at N-1.
//For example
//n=N-1 has coefficent 1.0, which is the product of (N-1)-(N-1)=0 shifts
//n=N-2 is the sum of all products comprising 1 shift: sum_{j!=i, j<=N-1} shift[j], which
//n=N-3 is the sum of all products of 2 shifts: shift[0]*(shift[1] + shift[2] + ... shift[N-1]) + shift[1]*(shift[2] + shift[3] + ... shift[N-1]) + ... + shift[N-2]*shift[N-1]
// = sum_{j!=i, j<=N-2} shift[j] * ( sum_{k!=i,k>j;k<=N-1} shift[k] )
//etc...
//n=0 is the product of all shifts bar i
//Note, we contain the running product of MMdag in w: w_n+1 = MMdag w_n and definining w_0 = w
Fermion_t tmp = bfm.threadedAllocFermion (mem_fast);
Fermion_t tmp2 = bfm.threadedAllocFermion (mem_fast);
for (int n = 0; n < nshift; n++) { //last term MMdag^{nshift-1}
//At this w = MMdag^n w
for (int i = 0; i < nshift; i++) {
double coeff_i = sigma_prod (i, n, nshift, mass);
if (bfm.isBoss () && (!me))
printf
("CGNE_prec_MdagM_multi_shift_with_guesses: n=%d i=%d sum_prod=%e \n",
n, i, coeff_i);
bfm.axpy (y[i], w, y[i], coeff_i); // y[i] += coeff[i] * MMdag^n w
}
bfm.Mprec (w, tmp, tmp2, DaggerNo); //tmp = M w
bfm.Mprec (tmp, w, tmp2, DaggerYes); //w = Mdag tmp
}
Fermion_t r = bfm.threadedAllocFermion (mem_fast);
//Also need r = src - Prod_j (MMdag + shift[j]) w
// = src - (MMdag + shift[i]) Prod_{j!=i, j<=N-1} (MMdag + shift[j]) w for any i
//Use i=N-1-1:
// = src - (MMdag + shift[N-1]) Prod_{j!=i, j<=N-1} (MMdag + shift[j]) w
// = src - (MMdag + shift[N-1]) y[N-1]
bfm.Mprec (y[nshift - 1], tmp, tmp2, DaggerNo);
bfm.Mprec (tmp, r, tmp2, DaggerYes); //after this, r = MMdag y[N-1]
bfm.axpy (r, y[nshift - 1], r, mass[nshift - 1]); // r = (MMdag + shift[N-1])y[N-1]
bfm.axpy (r, r, src, -1.0); // r = src - (MMdag + shift[N-1])y[N-1]
//run standard multi-shift with r as the source
//To achieve the desired residual on the solution we need to modify the residuals to reflect the difference in source norm
//In the multi-mass solve we are trying to emulate:
//|resid|^2 = |orig src|^2 * (orig mresidual)^2
//Keeping this fixed requires so we want
//(new mresidual) = sqrt( |orig src|^2 * (orig mresidual)^2 / |new src|^2 )
Float orig_src_norm2 = bfm.norm (src);
Float new_src_norm2 = bfm.norm (r);
for (int i = 0; i < nshift; i++)
mresidual[i] *= sqrt (orig_src_norm2 / new_src_norm2);
int iter =
bfm.CGNE_prec_MdagM_multi_shift (psi, r, mass, alpha, nshift, mresidual, 0);
//Add the solutions to the initial guess vectors y
for (int n = 0; n < nshift; n++)
bfm.axpy (psi[n], psi[n], y[n], 1.0);
// Check answers
if (bfm.isBoss () && (!me))
printf
("bfm::CGNE_prec_MdagM_multi_shift_with_guesses: Checking solutions\n");
for (int s = 0; s < nshift; s++) {
bfm.Mprec (psi[s], tmp, tmp2, DaggerNo);
bfm.Mprec (tmp, w, tmp2, DaggerYes); //reuse w, now w=MMdag psi[s]
bfm.axpy (w, psi[s], w, mass[s]); // w += mass[s]*psi[s]
bfm.axpy (r, w, src, -1); // r = src - (MMdag+mass[s])*psi[s]
double rn = bfm.norm (r);
double cn = bfm.norm (src);
if (bfm.isBoss () && !me) {
printf
("bfm::CGNE_prec_MdagM_multi_shift_with_guesses: shift[%d] true residual %le \n",
s, sqrt (rn / cn));
}
}
if (single) {
for (int s = 1; s < nshift; s++) {
bfm.axpy (psi[0], psi[s], psi[0], 1.0);
}
}
bfm.threadedFreeFermion (tmp);
bfm.threadedFreeFermion (tmp2);
bfm.threadedFreeFermion (w);
bfm.threadedFreeFermion (r);
for (int i = 0; i < nshift; i++) {
bfm.threadedFreeFermion (y[i]);
}
return iter;
}
inline double sigma_sum_recurse_2 (const int &i, const int &k, const int &start,
const int &N, double shifts[],
const int &nprod_remaining)
{
double out = 0.0;
for (int j = start; j < N; j++) {
if (j == i || j == k)
continue; //skip i,k
double toadd = shifts[j];
if (nprod_remaining > 1) {
toadd *=
sigma_sum_recurse_2 (i, k, j + 1, N, shifts, nprod_remaining - 1);
}
out += toadd;
}
return out;
}
inline double sigma_prod_2 (const int &i, const int &k, const int &n,
const int &N, double shifts[])
{
if (n == N - 2)
return 1.0;
int prod_size = N - 2 - n;
return sigma_sum_recurse_2 (i, k, 0, N, shifts, prod_size);
}
//CK: Multi-mass with multiple sources, using the method in the Osborn paper cited above
template < typename Float >
int CGNE_prec_MdagM_multi_shift_multi_src (Fermion_t psi[],
Fermion_t src[],
double mass[],
double alpha[],
int nshift,
const double mresidual_in[],
int single, bfm_evo < Float > &bfm)
{
//'single' appears to sum all the solutions into psi[0]
int me = bfm.thread_barrier ();
//Not know if mresidual_in is shared or not. To be safe , threads each take a local copy of the residuals that they can
//all simultaneously modify without worrying about race conditions
double mresidual[nshift];
for (int i = 0; i < nshift; ++i)
mresidual[i] = mresidual_in[i];
//Here we need to form y_i = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j)/(\sigma_j-\sigma_k) ] (b_i - b_k)/(\sigma_i-\sigma_k)
// = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] \prod_{j\neq k} 1/(\sigma_j-\sigma_k) (b_i - b_k)
// = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] c_k (b_i - b_k)
//where c_k = \prod_{j\neq k} 1/(\sigma_j-\sigma_k) is calculated beforehand
Fermion_t y[nshift];
Fermion_t Apowm_src[nshift];
double c[nshift];
for (int i = 0; i < nshift; i++) {
y[i] = bfm.threadedAllocFermion (mem_fast);
Apowm_src[i] = bfm.threadedAllocFermion (mem_fast);
bfm.copy (Apowm_src[i], src[i]);
bfm.set_zero (y[i]);
//Calculate coefficients c_i
c[i] = 1.0;
for (int j = 0; j < nshift; j++) {
if (j == i)
continue;
c[i] *= 1.0 / (mass[j] - mass[i]);
}
}
//I don't think we can avoid doing ~nshift^2 matrix multiplications, but I believe we can avoid storing nshift^2 vectors
Fermion_t tmp = bfm.threadedAllocFermion (mem_fast);
Fermion_t tmp2 = bfm.threadedAllocFermion (mem_fast);
//y_i = \sum_{k\neq i} [ \prod_{j\neq i,k} (A + \sigma_j) ] c_k (b_i - b_k)
// = \sum_{k\neq i} c_k { [ \prod_{j\neq i,k} (A + \sigma_j) ] b_i - [ \prod_{j\neq i,k} (A + \sigma_j) ] b_k }
//These objects are the fundamental elements \prod_{j\neq i,k} (A + \sigma_j) ] b_i
//Multiplying out = A^{N-2} + A^{N-3}\sum_{j\neq i,k} \sigma_j + A^{N-4} \sum_{j\neq i,k} \sigma_j \sum_{l>j, l\neq i,k} \sigma_l + .....
for (int m = 0; m < nshift - 1; m++) { //last term MMdag^{nshift-2}
for (int i = 0; i < nshift; i++) {
for (int k = 0; k < nshift; k++) { //\sum_{k\neq i}
if (k == i)
continue;
double coeff = c[k] * sigma_prod_2 (i, k, m, nshift, mass);
bfm.axpy (y[i], Apowm_src[i], y[i], coeff);
bfm.axpy (y[i], Apowm_src[k], y[i], -coeff);
}
}
for (int n = 0; n < nshift; n++) {
bfm.Mprec (Apowm_src[n], tmp, tmp2, DaggerNo); //tmp = M w
bfm.Mprec (tmp, Apowm_src[n], tmp2, DaggerYes); //w = Mdag tmp
}
}
Fermion_t r = bfm.threadedAllocFermion (mem_fast);
//Also need r = src_i - (A+\sigma_i)y_i for any i
bfm.Mprec (y[nshift - 1], tmp, tmp2, DaggerNo);
bfm.Mprec (tmp, r, tmp2, DaggerYes);
bfm.axpy (r, y[nshift - 1], r, mass[nshift - 1]);
bfm.axpy (r, r, src[nshift - 1], -1.0);
//run standard multi-shift with r as the source
//To achieve the desired residual on the solution we need to modify the residuals to reflect the difference in source norm
//In the multi-mass solve we are trying to emulate:
//|resid|^2 = |orig src|^2 * (orig mresidual)^2
//Keeping this fixed requires so we want
//(new mresidual) = sqrt( |orig src|^2 * (orig mresidual)^2 / |new src|^2 )
Float new_src_norm2 = bfm.norm (r);
for (int i = 0; i < nshift; i++) {
Float orig_src_norm2 = bfm.norm (src[i]);
if (bfm.isBoss () && !me)
printf ("bfm::CGNE_prec_MdagM_multi_src: input src[%d] norm2 %le\n", i,
orig_src_norm2);
mresidual[i] *= sqrt (orig_src_norm2 / new_src_norm2);
}
if (bfm.isBoss () && !me)
printf ("bfm::CGNE_prec_MdagM_multi_src: r norm2 %le\n", new_src_norm2);
int iter =
bfm.CGNE_prec_MdagM_multi_shift (psi, r, mass, alpha, nshift, mresidual, 0);
//Add the solutions to the initial guess vectors y
for (int n = 0; n < nshift; n++)
bfm.axpy (psi[n], psi[n], y[n], 1.0);
// Check answers
if (bfm.isBoss () && (!me))
printf ("bfm::CGNE_prec_MdagM_multi_src: Checking solutions\n");
for (int s = 0; s < nshift; s++) {
bfm.Mprec (psi[s], tmp, tmp2, DaggerNo);
bfm.Mprec (tmp, Apowm_src[0], tmp2, DaggerYes); //reuse Apowm_src[0], now w=MMdag psi[s]
bfm.axpy (Apowm_src[0], psi[s], Apowm_src[0], mass[s]); // Apowm_src[0] += mass[s]*psi[s]
bfm.axpy (r, Apowm_src[0], src[s], -1); // r = src - (MMdag+mass[s])*psi[s]
double rn = bfm.norm (r);
double cn = bfm.norm (src[s]);
if (bfm.isBoss () && !me) {
printf ("bfm::CGNE_prec_MdagM_multi_src: shift[%d] true residual %le \n",
s, sqrt (rn / cn));
}
}
if (single) {
for (int s = 1; s < nshift; s++) {
bfm.axpy (psi[0], psi[s], psi[0], 1.0);
}
}
bfm.threadedFreeFermion (tmp);
bfm.threadedFreeFermion (tmp2);
bfm.threadedFreeFermion (r);
for (int i = 0; i < nshift; i++) {
bfm.threadedFreeFermion (Apowm_src[i]);
bfm.threadedFreeFermion (y[i]);
}
return iter;
}
template < typename Float >
inline void MdagMplusShift (Fermion_t in, Fermion_t out, const double &shift,
Fermion_t tmp1, Fermion_t tmp2,
bfm_evo < Float > &bfm)
{
bfm.Mprec (in, tmp1, tmp2, DaggerNo);
bfm.Mprec (tmp1, out, tmp2, DaggerYes);
bfm.axpy (out, in, out, shift);
}
//CK: mixed precision multi-mass using multiple single precision restarted inner loop.
// Does not work very well because the rediduals get very large such that the required stopping conditions
// are less than single precision accuracy
// Both sol and src are double precision fermions. Single precision
// solver is only used internally.
// mass, alpha and nshift as usual
// dresidual are the target residuals
// fresidual are the initial single precision residuals, which are dynamically modified during the solve
//
// Things to be set before using this function:
//
// double precision solver mass, max iteration
// number.
//
// single precision solver mass, max iteration
// number.
//
// Gauge field must be initialized for both double and single prec
// solvers.
//
// the communication subsystem must be ready for bfm_d to use (due to
// the way bfmcommspi is written, one must reinitialize the
// communication object when switching between single and double
// precisions).
//
// max_cycle: the maximum number of restarts will be performed.
inline int threaded_cg_mixed_restarted_multi_shift_MdagM (Fermion_t sol[],
Fermion_t src,
double mass[],
double alpha[],
int nshift,
const double
dresidual[],
const double
fresidual_in[],
int single,
bfm_evo <
double >&bfm_d,
bfm_evo <
float >&bfm_f,
int max_cycle)
{
int me = bfm_d.thread_barrier ();
double fresidual[nshift];
for (int i = 0; i < nshift; ++i)
fresidual[i] = fresidual_in[i]; //local thread copy that can be modified freely
Fermion_t src_d = bfm_d.threadedAllocFermion ();
Fermion_t tv1_d = bfm_d.threadedAllocFermion ();
Fermion_t tv2_d = bfm_d.threadedAllocFermion ();
double src_norm = bfm_d.norm (src);
//Source and solution locations for single precision input and output
Fermion_t sol_f[nshift];
Fermion_t src_f[nshift];
int finished[nshift];
double stop[nshift];
for (int n = 0; n < nshift; n++) {
sol_f[n] = bfm_f.threadedAllocFermion ();
src_f[n] = bfm_f.threadedAllocFermion ();
finished[n] = 0;
stop[n] = src_norm * dresidual[n] * dresidual[n];
}
//Do an initial single precision solve with regular multi-mass solver using input residuals
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_restarted_multi_shift_MdagM: Doing initial single precision solve\n");
{
threaded_convFermion (src_f[0], src, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f[0], mass, alpha, nshift,
fresidual, 0);
for (int n = 0; n < nshift; n++)
threaded_convFermion (sol[n], sol_f[n], bfm_d, bfm_f);
switch_comm (bfm_d, bfm_f);
}
//Perform restarted multi-mass until the double prec residual meets the target
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_restarted_multi_shift_MdagM: Starting main iteration loop\n");
int iter = 0;
for (int i = 0; i < max_cycle; ++i) {
// compute double precision rsd and also new RHS vector for each shift
int fin_count = 0;
for (int n = 0; n < nshift; n++) {
if (!finished[n]) {
MdagMplusShift < double >(sol[n], tv1_d, mass[n], src_d, tv2_d, bfm_d); // tv1_d = (MdagM + mass[n]) * sol[n]
double norm = bfm_d.axpy_norm (src_d, tv1_d, src, -1.);
//ad hoc stopping condition from cg_mixed implementation
if (norm < 100. * stop[n])
finished[n] = 1;
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_restarted_multi_shift_MdagM: iter = %d shift = %d rsd = %17.10e(d) stop = %17.10e(d) finished = %d\n",
i, n, norm, stop[n], finished[n]);
while (norm * fresidual[n] * fresidual[n] < stop[n])
fresidual[n] *= 2;
threaded_convFermion (src_f[n], src_d, bfm_f, bfm_d);
}
fin_count += finished[n];
}
if (fin_count == nshift)
break; //stop when all have finished
switch_comm (bfm_f, bfm_d);
iter +=
CGNE_prec_MdagM_multi_shift_multi_src (sol_f, src_f, mass, alpha, nshift,
fresidual, 0, bfm_f);
switch_comm (bfm_d, bfm_f);
for (int n = 0; n < nshift; n++) {
threaded_convFermion (tv1_d, sol_f[n], bfm_d, bfm_f);
bfm_d.axpy (sol[n], tv1_d, sol[n], 1.);
}
}
bfm_d.threadedFreeFermion (src_d);
bfm_d.threadedFreeFermion (tv1_d);
bfm_d.threadedFreeFermion (tv2_d);
for (int i = 0; i < nshift; i++) {
bfm_f.threadedFreeFermion (sol_f[i]);
bfm_f.threadedFreeFermion (src_f[i]);
}
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_restarted_multi_shift_MdagM: Running double precision multi-mass using single precision version as guess");
iter +=
CGNE_prec_MdagM_multi_shift_with_guesses (sol, src, sol, mass, alpha,
nshift, dresidual, single, bfm_d);
return iter;
}
//CK: mixed precision multi-mass using single precision solve as guess for double precision
// Both sol and src are double precision fermions. Single precision
// solver is only used internally.
// mass, alpha and nshift as usual
// dresidual are the target residuals
// fresidual are the initial single precision residuals, which are dynamically modified during the solve
//
// Things to be set before using this function:
//
// double precision solver mass, max iteration
// number.
//
// single precision solver mass, max iteration
// number.
//
// Gauge field must be initialized for both double and single prec
// solvers.
//
// the communication subsystem must be ready for bfm_d to use (due to
// the way bfmcommspi is written, one must reinitialize the
// communication object when switching between single and double
// precisions).
//
// max_cycle: the maximum number of restarts will be performed.
inline int threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM (Fermion_t
sol[],
Fermion_t
src,
double
mass[],
double
alpha[],
int nshift,
double
dresidual
[],
double
fresidual
[],
int single,
bfm_evo <
double
>&bfm_d,
bfm_evo <
float
>&bfm_f)
{
int me = bfm_d.thread_barrier ();
//Source and solution locations for single precision input and output
Fermion_t sol_f[nshift];
for (int n = 0; n < nshift; n++)
sol_f[n] = bfm_f.threadedAllocFermion ();
Fermion_t src_f = bfm_f.threadedAllocFermion ();
int iter = 0;
//Do an initial single precision solve with regular multi-mass solver using input residuals
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM: Doing single precision solve\n");
{
threaded_convFermion (src_f, src, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
iter +=
bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f, mass, alpha, nshift,
fresidual, 0);
for (int n = 0; n < nshift; n++)
threaded_convFermion (sol[n], sol_f[n], bfm_d, bfm_f);
switch_comm (bfm_d, bfm_f);
}
bfm_f.threadedFreeFermion (src_f);
for (int n = 0; n < nshift; n++)
bfm_f.threadedFreeFermion (sol_f[n]);
if (bfm_f.isBoss () && !me)
printf
("threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM: Running double precision multi-mass using single precision version as guess");
iter +=
CGNE_prec_MdagM_multi_shift_with_guesses (sol, src, sol, mass, alpha,
nshift, dresidual, single, bfm_d);
return iter;
}
//CK: "Single Shift Inverter" : Modified version of int bfm::CGNE_prec that solves (MdagM + shift) psi = src
template < typename Float >
int threaded_CGNE_MdagM_plus_shift (Fermion_t psi, Fermion_t src, Float shift,
bfm_evo < Float > &bfm)
{
//Standard CG algorithm from BFM:
//(Use subscript to label iteration)
//r_1 = MMdag psi - src, p_1 = MMdag psi - src, c_1 = |r_1|^2 = |p_1|^2
//Iteration:
//d_k = |M p_k|^2 = p_k^dag M^dag M p_k
//a_k = c_k / d_k
//r_k+1 = MMdag p_k - a_k r_k
//c_k+1 = |r_k+1|^2
//b_k = c_k+1 / c_k
//psi = a_k p_k + psi
//p_k+1 = b_k p_k + r_k+1
//Note: norm(vec) is actually |vec|^2
//Shift modified version should look similar:
//r_1 = (MMdag+shift) psi - src, p_1 = (MMdag+shift) psi - src, c_1 = |r_1|^2 = |p_1|^2
//Iteration:
//d_k = p_k^dag M^dag M p_k + shift * p_k^dag p_k
//a_k = c_k / d_k
//r_k+1 = (MMdag+shift) p_k - a_k r_k
//c_k+1 = |r_k+1|^2
//b_k = c_k+1 / c_k
//psi = a_k p_k + psi
//p_k+1 = b_k p_k + r_k+1
int me = bfm.thread_barrier ();
int verbose = bfm.verbose;
double f;
double cp, c, a, d, b;
double residual = bfm.residual;
int max_iter = bfm.max_iter;
if (bfm.isBoss () && (!me)) {
bfm.InverterEnter ();
}
Fermion_t p = bfm.threadedAllocFermion (mem_fast);
Fermion_t tmp = bfm.threadedAllocFermion (mem_fast);
Fermion_t mp = bfm.threadedAllocFermion (mem_fast);
Fermion_t mmp = bfm.threadedAllocFermion (mem_fast);
Fermion_t r = bfm.threadedAllocFermion (mem_fast);
//Initial residual computation & set up
double guess = bfm.norm (psi);
d = bfm.Mprec (psi, mp, tmp, DaggerNo);
bfm.Mprec (mp, mmp, tmp, DaggerYes);
b = bfm.axpy_norm (mmp, psi, mmp, shift); //MMdag psi + shift*psi
cp = bfm.axpy_norm (r, mmp, src, -1.0);
a = bfm.axpy_norm (p, mmp, src, -1.0);
//a = bfm.norm(p);
//cp= bfm.norm(r);
//r_1 = (MMdag+shift) psi - src, p_1 = (MMdag+shift) psi - src, c_1 = |r_1|^2 = |p_1|^2
Float ssq = bfm.norm (src);
if (verbose && bfm.isBoss () && !me) {
printf ("mixed_cg::CGNE_MdagM_plus_shift gues %le \n", guess);
printf ("mixed_cg::CGNE_MdagM_plus_shift src %le \n", ssq);
printf ("mixed_cg::CGNE_MdagM_plus_shift Mp %le \n", d);
printf ("mixed_cg::CGNE_MdagM_plus_shift (MMdag + shift)p %le \n", b);
printf ("mixed_cg::CGNE_MdagM_plus_shift r %le \n", cp);
printf ("mixed_cg::CGNE_MdagM_plus_shift p %le \n", a);
}
Float rsq = residual * residual * ssq;
//Check if guess is really REALLY good :)
if (cp <= rsq) {
if (verbose && bfm.isBoss () && !me) {
printf
("mixed_cg::CGNE_MdagM_plus_shift k=0 converged - suspiciously nice guess %le %le\n",
cp, rsq);
}
bfm.threadedFreeFermion (tmp);
bfm.threadedFreeFermion (p);
bfm.threadedFreeFermion (mp);
bfm.threadedFreeFermion (mmp);
bfm.threadedFreeFermion (r);
if (bfm.isBoss () && (!me)) {
bfm.InverterExit ();
}
return 0;
}
if (verbose && bfm.isBoss () && !me)
printf ("mixed_cg::CGNE_MdagM_plus_shift k=0 residual %le rsq %le\n", cp,
rsq);
if (bfm.isBoss () && !me) {
if (bfm.watchfile) {
printf ("mixed_cg::CGNE_MdagM_plus_shift watching file \"%s\"\n",
bfm.watchfile);
}
}
struct timeval start, stop;
if (bfm.isBoss () && !me)
gettimeofday (&start, NULL);
for (int k = 1; k <= max_iter; k++) {
bfm.iter = k;
uint64_t t_iter_1 = GetTimeBase ();
c = cp;
uint64_t t_mprec_1 = GetTimeBase ();
//d_k = p_k^dag M^dag M p_k + shift * p_k^dag p_k
d = bfm.Mprec (p, mp, tmp, 0, 1);
double norm_p = bfm.norm (p);
d += shift * norm_p;
uint64_t t_mprec_2 = GetTimeBase ();
a = c / d;
uint64_t t_mprec_3 = GetTimeBase ();
bfm.Mprec (mp, mmp, tmp, 1);
bfm.axpy (mmp, p, mmp, shift); // mmp = MMdag p + shift * p
uint64_t t_mprec_4 = GetTimeBase ();
uint64_t tr1 = GetTimeBase ();
cp = bfm.axpy_norm (r, mmp, r, -a); //r_k+1 = (MMdag+shift) p_k - a_k r_k
b = cp / c;
uint64_t tr2 = GetTimeBase ();
uint64_t tpsi1 = GetTimeBase ();
bfm.axpy (psi, p, psi, a);
uint64_t tpsi2 = GetTimeBase ();
// New (conjugate/M-orthogonal) search direction
uint64_t tp1 = GetTimeBase ();
bfm.axpy (p, p, r, b);
uint64_t tp2 = GetTimeBase ();
uint64_t t_iter_2 = GetTimeBase ();
// verbose nonsense
if ((bfm.iter == bfm.time_report_iter) && bfm.isBoss () && (!me) && verbose) {
int lx = bfm.node_latt[0];
int ly = bfm.node_latt[1];
int lz = bfm.node_latt[2];
int lt = bfm.node_latt[3];
int cb4dsites = (lx * ly * lz * lt) / 2;
printf ("fermionCacheFootprint: %ld \n", 7 * bfm.axpyBytes () / 3);
printf ("gauge CacheFootprint: %ld \n", 2 * 18 * 8 * cb4dsites * 2);
printf ("fermionVecBytes : %ld \n", bfm.axpyBytes () / 3);
printf ("axpyBytes : %ld \n", bfm.axpyBytes ());
printf ("axpy (soln) : %ld cyc %le MB/s\n", (tpsi2 - tpsi1),
(double) bfm.axpyBytes () * 1600. / (tpsi2 - tpsi1));
printf ("axpy_norm (residual) : %ld cyc %le MB/s\n", (tr2 - tr1),
(double) bfm.axpyBytes () * 1600. / (tr2 - tr1));
printf ("axpy (search) : %ld cyc %le MB/s\n", (tp2 - tp1),
(double) bfm.axpyBytes () * 1600. / (tp2 - tp1));
printf ("Iter time : %ld cyc\n", t_iter_2 - t_iter_1);
printf ("linalg time : %ld cyc\n",
t_iter_2 - t_iter_1 - (t_mprec_2 - t_mprec_1) - (t_mprec_4 -
t_mprec_3));
printf ("Mprec time : %ld cyc\n", t_mprec_2 - t_mprec_1);
printf ("Mprec time : %ld cyc\n", t_mprec_4 - t_mprec_3);
fflush (stdout);
}
if (((k % 100 == 0) && (verbose != 0)) || (verbose > 10)) {
if (bfm.isBoss () && !me) {
printf ("mixed_cg::CGNE_MdagM_plus_shift: k=%d r^2=%le %le %lx\n", k,
cp, sqrt (cp / ssq), &bfm);
}
}
// Stopping condition
if (cp <= rsq) {
//I did not update the flops count so I have commented them out
struct timeval diff;
if (bfm.isBoss () && !me) {
gettimeofday (&stop, NULL);
timersub (&stop, &start, &diff);
}
if (bfm.isBoss () && !me)
printf ("mixed_cg::CGNE_MdagM_plus_shift converged in %d iterations\n",
k);
if (bfm.isBoss () && !me)
printf ("mixed_cg::CGNE_MdagM_plus_shift converged in %d.%6.6d s\n",
diff.tv_sec, diff.tv_usec);
//double flops = mprecFlops()*2.0 + 2.0*axpyNormFlops() + axpyFlops()*2.0;
//flops = flops * k;
//double t = diff.tv_sec*1.0E6 + diff.tv_usec;
// if ( isBoss()&& !me )
// printf("mixed_cg::CGNE_MdagM_plus_shift: %d mprec flops/site\n",mprecFlopsPerSite());
// if ( isBoss()&& !me ) printf("mixed_cg::CGNE_MdagM_plus_shift: %le flops\n",flops);
// if ( isBoss()&& !me ) printf("mixed_cg::CGNE_MdagM_plus_shift: %le mflops per node\n",flops/t);
if (bfm.isBoss () && !me) {
printf ("mixed_cg::CGNE_MdagM_plus_shift calculating true resid. V0\n");
fflush (stdout);
} //DEBUG
bfm.Mprec (psi, mp, tmp, 0);
bfm.Mprec (mp, mmp, tmp, 1);
bfm.axpy (mmp, psi, mmp, shift);
double resid = bfm.axpy_norm (tmp, src, mmp, -1.0);
double src_norm = bfm.norm (src);
double true_residual = sqrt (resid / src_norm);
if (bfm.isBoss () && !me)
printf ("mixed_cg::CGNE_MdagM_plus_shift: true residual is %le \n",
true_residual);
if (bfm.isBoss () && !me) {
printf ("mixed_cg::CGNE_MdagM_plus_shift cleaning up\n");
fflush (stdout);
} //DEBUG
bfm.threadedFreeFermion (tmp);
bfm.threadedFreeFermion (p);
bfm.threadedFreeFermion (mp);
bfm.threadedFreeFermion (mmp);
bfm.threadedFreeFermion (r);
#ifdef LIST_ENGINE
if (bfm.list_engine)
bfm.L1P_PatternUnconfigure ();
#endif
if (bfm.isBoss () && (!me)) {
bfm.InverterExit ();
}
return k;
}
}
if (bfm.isBoss () && !me)
printf ("mixed_cg::CGNE_MdagM_plus_shift: CG not converged \n");
bfm.threadedFreeFermion (tmp);
bfm.threadedFreeFermion (p);
bfm.threadedFreeFermion (mp);
bfm.threadedFreeFermion (mmp);
bfm.threadedFreeFermion (r);
#ifdef LIST_ENGINE
if (bfm.list_engine)
bfm.L1P_PatternUnconfigure ();
#endif
if (bfm.isBoss () && (!me)) {
bfm.InverterExit ();
}
return -1;
}
//CK: Single precision solve followed by defect correction loop using single shift solver independently
// for each shift
// Both sol and src are double precision fermions. Single precision
// solver is only used internally.
//
// Things to be set before using this function:
//
// double precision solver mass, stopping condition, max iteration
// number.
//
// single precision solver mass, stopping condition, max iteration
// number.
//
// Gauge field must be initialized for both double and single prec
// solvers.
//
// the communication subsystem must be ready for bfm_d to use (due to
// the way bfmcommspi is written, one must reinitialize the
// communication object when switching between single and double
// precisions).
//
// max_cycle: the maximum number of restarts will be performed.
//fresidual are the residuals used for the initial single precision solve
//min_fp_resid: the smallest value for the residual of the initial single-precision multi-mass solve
inline int threaded_cg_mixed_defect_correction_multi_shift_MdagM (Fermion_t
sol[],
Fermion_t src,
double mass[],
double
alpha[],
bfm_evo <
double
>&bfm_d,
bfm_evo <
float >&bfm_f,
int nshift,
double
mresidual[],
double
fresidual[],
int single,
int max_cycle)
{
int me = bfm_d.thread_barrier ();
double frsd = bfm_f.residual; //save original residual for later restoration
//First we perform the multi-mass inversion using the single-precision solver
Fermion_t src_f = bfm_f.threadedAllocFermion ();
Fermion_t sol_f[nshift];
for (int i = 0; i < nshift; i++)
sol_f[i] = bfm_f.threadedAllocFermion ();
threaded_convFermion (src_f, src, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
int single_prec_iter =
bfm_f.CGNE_prec_MdagM_multi_shift (sol_f, src_f, mass, alpha, nshift,
fresidual, 0);
if (bfm_f.isBoss () && !me) {
printf
("threaded_cg_mixed_defect_correction_multi_shift_MdagM: single-prec multi-shift iter = %d\n",
single_prec_iter);
}
//Now we loop through the shifted solutions and do defect-correction on each individually
switch_comm (bfm_d, bfm_f);
for (int i = 0; i < nshift; i++)
threaded_convFermion (sol[i], sol_f[i], bfm_d, bfm_f);
double src_norm = bfm_d.norm (src);
Fermion_t tv1_d = bfm_d.threadedAllocFermion ();
Fermion_t tv2_d = bfm_d.threadedAllocFermion ();
Fermion_t src_d = bfm_d.threadedAllocFermion ();
int iter = 0;
for (int shift = 0; shift < nshift; shift++) {
double stop = src_norm * mresidual[shift] * mresidual[shift];
bfm_f.thread_barrier (); //make sure no threads have yet to write to bfm_f.residual from previous loop cycle
bfm_f.residual = mresidual[shift];
for (int i = 0; i < max_cycle; ++i) {
// compute double precision rsd and also new RHS vector.
bfm_d.Mprec (sol[shift], tv1_d, src_d, 0, 0); //here src_d is just used as a temp storage
bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2_d = MdagM * sol
bfm_d.axpy (tv2_d, sol[shift], tv2_d, mass[shift]); //tv2_d = (MdagM + shift)* sol
double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.);
// Hantao's ad hoc stopping condition
if (norm < 100. * stop)
break;
if (!me)
while (norm * bfm_f.residual * bfm_f.residual < stop)
bfm_f.residual *= 2;
//bfm_f.thread_barrier(); //Not needed because there is a barrier at start of next call
if (bfm_f.isBoss () && !me) {
printf
("threaded_cg_mixed_defect_correction_multi_shift_MdagM: shift = %d, defect correction cycle = %d rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d), next single prec target resid %17.10e]\n",
shift, i, norm, stop, sqrt (norm / src_norm), bfm_f.residual);
}
//We need to invert MdagM + shift, for which we cannot use the regular inverter. Use my optimised single-shift inverter
//Could also use the multi-shift with a single shift, but we can avoid some overhead by using my optimised version
threaded_convFermion (src_f, src_d, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
bfm_f.set_zero (sol_f[shift]);
iter +=
threaded_CGNE_MdagM_plus_shift < float >(sol_f[shift], src_f,
mass[shift], bfm_f);
switch_comm (bfm_d, bfm_f);
threaded_convFermion (tv1_d, sol_f[shift], bfm_d, bfm_f);
bfm_d.axpy (sol[shift], tv1_d, sol[shift], 1.);
}
bfm_f.residual = frsd; //restore original single precision residual at end of each step
}
bfm_d.threadedFreeFermion (src_d);
bfm_d.threadedFreeFermion (tv1_d);
bfm_d.threadedFreeFermion (tv2_d);
for (int i = 0; i < nshift; i++)
bfm_f.threadedFreeFermion (sol_f[i]);
bfm_f.threadedFreeFermion (src_f);
for (int shift = 0; shift < nshift; shift++) {
if (bfm_d.isBoss () && !me)
printf
("threaded_cg_mixed_defect_correction_multi_shift_MdagM: doing final inversion for shift %d using corrected solution as guess\n",
shift);
double restore_resid = bfm_d.residual;
bfm_d.thread_barrier (); //make sure all threads get the same value before we change it
bfm_d.residual = mresidual[shift];
iter +=
threaded_CGNE_MdagM_plus_shift < double >(sol[shift], src, mass[shift],
bfm_d);
bfm_d.residual = restore_resid;
//bfm_d.thread_barrier(); //Not needed because barrier in next call
double sol_norm = bfm_d.norm (sol[shift]);
if (bfm_d.isBoss () && !me)
printf
("threaded_cg_mixed_defect_correction_multi_shift_MdagM: final sol[%d] norm = %17.10e\n",
shift, sol_norm);
}
if (single) {
for (int s = 1; s < nshift; s++) {
bfm_d.axpy (sol[0], sol[s], sol[0], 1.0);
}
}
return iter;
}
//CK 2014: The version below performs the multi-shift with the matrix multiplication in single precision. The residual is stored in single precision, but the search directions and solution are
//stored in double precision. Every update_freq iterations the residual is corrected in double precision.
//Note that the final double precision residuals may not be as good as desired, so you may want to perform defect correction on each pole afterwards. I have added a version that does this extra step below.
inline int threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp (Fermion_t psi[],
Fermion_t src,
double mass[],
double alpha[],
int nshift,
double mresidual[],
int single,
bfm_evo <
float >&bfm_f,
bfm_evo <
double >&bfm_d,
int update_freq =
100,
int report_freq =
-1)
{
//NOTE: Assumes bfm_d comms are active
//update_freq is the frequency at which the reliable update step is performed
//report_freq prints the double precision true residual when k % report_freq = 0. Use -1 to disable
int me = bfm_d.thread_barrier ();
double bs[nshift];
double rsq[nshift];
double z[nshift][2];
int converged[nshift];
const int primary = 0;
//Primary shift fields CG iteration
double a, b, c, d;
double cp, bp; //prev
//Single precision fields
Fermion_t r = bfm_f.threadedAllocFermion (mem_slow); //residual vector, single precision
Fermion_t tmp = bfm_f.threadedAllocFermion (mem_fast);
Fermion_t p = bfm_f.threadedAllocFermion (mem_fast);
Fermion_t mp = bfm_f.threadedAllocFermion (mem_fast);
Fermion_t mmp = bfm_f.threadedAllocFermion (mem_fast);
Fermion_t src_f = bfm_f.threadedAllocFermion (mem_slow);
mixed_cg::threaded_convFermion_fast (src_f, src, bfm_f, bfm_d);
//Double precision fields
Fermion_t p_d = bfm_d.threadedAllocFermion (mem_fast); //search direction, double precision
Fermion_t tmp_d = bfm_d.threadedAllocFermion (mem_fast);
Fermion_t mp_d = bfm_d.threadedAllocFermion (mem_fast);
Fermion_t mmp_d = bfm_d.threadedAllocFermion (mem_fast);
Fermion_t ps_d[nshift]; // search directions (double precision)
for (int i = 0; i < nshift; i++) {
ps_d[i] = bfm_d.threadedAllocFermion (mem_slow);
converged[i] = 0;
}
#define DEALLOCATE_ALL \
bfm_f.threadedFreeFermion(r); \
bfm_f.threadedFreeFermion(tmp); \
bfm_f.threadedFreeFermion(p); \
bfm_f.threadedFreeFermion(mp); \
bfm_f.threadedFreeFermion(mmp); \
bfm_f.threadedFreeFermion(src_f); \
bfm_d.threadedFreeFermion(p_d); \
bfm_d.threadedFreeFermion(tmp_d); \
bfm_d.threadedFreeFermion(mp_d); \
bfm_d.threadedFreeFermion(mmp_d); \
for(int s=0;s<nshift;s++) bfm_d.threadedFreeFermion(ps_d[s])
// Check lightest mass
for (int s = 0; s < nshift; s++) {
if (mass[s] < mass[primary]) {
printf ("First shift not lightest - oops\n");
exit (-1);
}
}
cp = bfm_d.norm (src);
for (int s = 0; s < nshift; s++) {
rsq[s] = cp * mresidual[s] * mresidual[s];
bfm_d.copy (ps_d[s], src);
}
// r and p for primary
bfm_f.copy (r, src_f); //residual vector in single prec
bfm_d.copy (p_d, src);
double rn = cp; //norm of src = p_d
mixed_cg::switch_comm (bfm_f, bfm_d);
mixed_cg::threaded_convFermion_fast (p, p_d, bfm_f, bfm_d);
d = bfm_f.Mprec (p, mp, tmp, DaggerNo, 1); //mp = Mpc p, what is the 'norm' of?? I think its |Mpc p|^2
bfm_f.Mprec (mp, mmp, tmp, DaggerYes); //mmp = Mpc^dag mp = Mpc^dag Mpc p
bfm_f.axpy (mmp, p, mmp, mass[0]); //mmp = p*mass[0]+mmp
d += rn * mass[0];
b = -cp / d;
if (bfm_f.isBoss () && !me)
printf ("bfmbase::CGNE_prec_multi: b = -cp/d = -%le/%le = %le\n", cp, d, b);
// Set up the various shift variables
int iz = 0;
z[0][1 - iz] = 1.0;
z[0][iz] = 1.0;
bs[0] = b;
for (int s = 1; s < nshift; s++) {
z[s][1 - iz] = 1.0;
z[s][iz] = 1.0 / (1.0 - b * (mass[s] - mass[0]));
bs[s] = b * z[s][iz]; // Sign relative to Mike - FIXME
}
c = bfm_f.axpy_norm (r, mmp, r, b);
if (bfm_f.isBoss () && !me)
printf ("bfmbase::CGNE_prec_multi: k=0 residual %le \n", c);
for (int s = 0; s < nshift; s++) {
bfm_d.axpby (psi[s], src, src, 0., -bs[s] * alpha[s]); //initialize double prec solutions
}
// Iteration loop
for (int k = 1; k <= bfm_f.max_iter; k++) {
a = c / cp;
#define CK_BAGEL_OPTIMISE
#ifndef CK_BAGEL_OPTIMISE
mixed_cg::threaded_convFermion_fast (tmp_d, r, bfm_d, bfm_f); //store double prec copy of r in tmp_d
bfm_d.axpy (p_d, p_d, tmp_d, a);
for (int s = 0; s < nshift; s++) {
if (!converged[s]) {
if (s == 0) {
bfm_d.axpy (ps_d[s], ps_d[s], tmp_d, a);
} else {
double as = a * z[s][iz] * bs[s] / (z[s][1 - iz] * b);
bfm_d.axpby (ps_d[s], tmp_d, ps_d[s], z[s][iz], as); //ps_d[s] = z[s][iz]*tmp_d + as*ps_d[s]
}
}
}
#else
//Note, I moved the update of the search vectors to further down so it can potentially be combined with the solution vector update
double as_uc[nshift + 1], z_uc[nshift + 1];
Fermion_t ps_d_unconv[nshift + 1];
int nunconv = 0;
for (int s = 0; s < nshift; s++)
if (!converged[s]) {
ps_d_unconv[nunconv] = ps_d[s];
z_uc[nunconv] = z[s][iz];
if (s == 0)
as_uc[nunconv] = a;
else
as_uc[nunconv] = a * z[s][iz] * bs[s] / (z[s][1 - iz] * b);
++nunconv;
}
//# ifdef USE_NEW_BFM_GPARITY
#if 1
bfm_d.axpy_sy (p_d, p_d, r, a);
#else
bfm_d.axpy (p_d, p_d, r, a, 1);
#endif
#endif
cp = c;
mixed_cg::threaded_convFermion_fast (p, p_d, bfm_f, bfm_d);
d = bfm_f.Mprec (p, mp, tmp, DaggerNo, 1);
bfm_f.Mprec (mp, mmp, tmp, DaggerYes);
bfm_f.axpy (mmp, p, mmp, mass[0]);
double rn = bfm_f.norm (p);
d += rn * mass[0];
bp = b;
b = -cp / d;
// Toggle the recurrence history
bs[0] = b;
iz = 1 - iz;
for (int s = 1; s < nshift; s++) {
if (!converged[s]) {
double z0 = z[s][1 - iz];
double z1 = z[s][iz];
z[s][iz] = z0 * z1 * bp
/ (b * a * (z1 - z0) + z1 * bp * (1 - (mass[s] - mass[0]) * b));
bs[s] = b * z[s][iz] / z0; // NB sign rel to Mike
}
}
#define CK_BAGEL_OPTIMISE_COMBINE_PSI_PS
#ifndef CK_BAGEL_OPTIMISE_COMBINE_PSI_PS
# ifdef CK_BAGEL_OPTIMISE //Update the search vectors here rather than above
bfm_d.axpby_multi_reusey (ps_d_unconv, ps_d_unconv, r, as_uc, z_uc, nunconv,
1);
# endif
for (int s = 0; s < nshift; s++) {
int ss = s;
if (!converged[s])
bfm_d.axpy (psi[ss], ps_d[s], psi[ss], -bs[s] * alpha[s]);
}
#else
//CK_BAGEL_OPTIMISE_COMBINE_PSI_PS, combine the above steps
double c_uc[nunconv];
Fermion_t psi_d_unconv[nunconv];
int off = 0;
for (int s = 0; s < nshift; s++)
if (!converged[s]) {
c_uc[off] = -bs[s] * alpha[s];
psi_d_unconv[off++] = psi[s];
}
bfm_d.cgmulti_update_srch_sol (psi_d_unconv, ps_d_unconv, r, as_uc, z_uc,
c_uc, nunconv, 1);
#endif
//Reliable update
if (k % update_freq == 0) {
double c_sp = bfm_f.axpy_norm (r, mmp, r, b);
//Replace r with true residual
mixed_cg::switch_comm (bfm_d, bfm_f);
bfm_d.Mprec (psi[0], mp_d, tmp_d, 0, 1);
bfm_d.Mprec (mp_d, mmp_d, tmp_d, 1);
bfm_d.axpy (mmp_d, psi[0], mmp_d, mass[0]);
c = bfm_d.axpy_norm (tmp_d, mmp_d, src, -1.0);
if (bfm_d.isBoss () && !me)
printf
("bfmbase::CGNE_prec_multi: reliable update iter %d, replaced |r|^2 = %.12le with |r|^2 = %.12le\n",
k, c_sp, c);
mixed_cg::threaded_convFermion_fast (r, tmp_d, bfm_f, bfm_d);
mixed_cg::switch_comm (bfm_f, bfm_d);
} else {
c = bfm_f.axpy_norm (r, mmp, r, b);
}
// Convergence checks
int all_converged = 1;
if (((k % 100) == 0) && bfm_f.isBoss () && (!me))
printf
("bfmbase::CGNE_prec_multi: k=%d c=%g, shift in current dir for lightest pole %.12e\n",
k, c, -bs[0] * alpha[0]);
for (int s = 0; s < nshift; s++) {
if (!converged[s]) {
double css = c * z[s][iz] * z[s][iz];
if (css < rsq[s])
converged[s] = 1;
else
all_converged = 0;
if (bfm_f.isBoss () && (!me) && converged[s])
printf
("bfmbase::CGNE_prec_multi: Shift %d converged on iter %d: test cur %g, targ %g [Stated true resid %g].\n",
s, k, css, rsq[s], (css / rsq[s]) * mresidual[s]);
else if (((k % 100) == 0) && bfm_f.isBoss () && (!me))
printf
("bfmbase::CGNE_prec_multi: Shift %d convergence test cur %g, targ %g [Stated true resid %g].\n",
s, css, rsq[s], sqrt (css / rsq[s]) * mresidual[s]);
}
}
if (converged[0] && !all_converged) {
if (bfm_f.isBoss () && !me)
printf
("bfmbase::CGNE_prec_multi: WARNING, shift[0] has converged but not all higher mass poles have. Algorithm ending here!\n");
all_converged = 1;
}
if (all_converged) {
if (bfm_f.isBoss () && (!me))
printf ("bfmbase::CGNE_prec_multi: k=%d All shifts have converged\n",
k);
if (bfm_f.isBoss () && (!me))
printf ("bfmbase::CGNE_prec_multi: k=%d Checking solutions\n", k);
// Check answers
mixed_cg::switch_comm (bfm_d, bfm_f);
for (int s = 0; s < nshift; s++) {
//Convert solution to double precision
bfm_d.Mprec (psi[s], mp_d, tmp_d, DaggerNo);
bfm_d.Mprec (mp_d, mmp_d, tmp_d, DaggerYes);
bfm_d.axpy (tmp_d, psi[s], mmp_d, mass[s]);
bfm_d.axpy (mp_d, tmp_d, src, -1);
double rn = bfm_d.norm (mp_d);
double cn = bfm_d.norm (src);
if (bfm_d.isBoss () && !me) {
printf ("double prec final: shift[%d] true residual %.12le \n", s,
sqrt (rn / cn));
}
}
if (single) {
for (int s = 1; s < nshift; s++) {
bfm_d.axpy (psi[0], psi[s], psi[0], 1.0);
}
}
DEALLOCATE_ALL;
return k;
} else if (report_freq != -1 && k % report_freq == 0) {
mixed_cg::switch_comm (bfm_d, bfm_f);
for (int s = 0; s < nshift; s++) {
double css = c * z[s][iz] * z[s][iz];
bfm_d.Mprec (psi[s], mp_d, tmp_d, DaggerNo);
bfm_d.Mprec (mp_d, mmp_d, tmp_d, DaggerYes);
bfm_d.axpy (tmp_d, psi[s], mmp_d, mass[s]);
bfm_d.axpy (mp_d, tmp_d, src, -1);
double rn = bfm_d.norm (mp_d);
double cn = bfm_d.norm (src);
if (bfm_d.isBoss () && !me) {
printf
("iter %d, double prec: shift[%d] true residual %.12le, running true residual %.12le [converged = %d]\n",
k, s, sqrt (rn / cn), sqrt (css / rsq[s]) * mresidual[s],
converged[s]);
}
}
mixed_cg::switch_comm (bfm_f, bfm_d);
}
}
mixed_cg::switch_comm (bfm_d, bfm_f);
if (bfm_d.isBoss () && !me)
printf ("bfmbase::CGNE_prec_multi: CG not converged \n");
DEALLOCATE_ALL;
return -1;
}
#undef DEALLOCATE_ALL
//This version has the following steps:
//1) Single precision multi-mass solve with reliable update and double precision shift vectors
//2) Single precision restarted CG with defect correction loop over poles
//3) Double precision restarted CG with defect correction loop over poles
inline int
threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction (Fermion_t
psi[],
Fermion_t
src,
double
mass[],
double
alpha[],
int nshift,
double
mresidual[],
int single,
bfm_evo <
float
>&bfm_f,
bfm_evo <
double
>&bfm_d,
int
update_freq =
100,
int
report_freq =
-1,
int max_cycle
= 10)
{
int me = bfm_d.thread_barrier ();
double frsd = bfm_f.residual; //save original residual for later restoration
struct timeval tstart, tstop, tdiff;
gettimeofday (&tstart, NULL);
int iter_multi =
threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp (psi, src, mass, alpha,
nshift, mresidual, 0,
bfm_f, bfm_d, update_freq,
report_freq);
gettimeofday (&tstop, NULL);
timersub (&tstop, &tstart, &tdiff);
if (bfm_d.isBoss () && !me)
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: Initial multi-shift iter = %d, time %d.%6.6d s\n",
iter_multi, tdiff.tv_sec, tdiff.tv_usec);
gettimeofday (&tstart, NULL);
Fermion_t src_f = bfm_f.threadedAllocFermion ();
Fermion_t sol_f = bfm_f.threadedAllocFermion ();
Fermion_t src_d = bfm_d.threadedAllocFermion ();
Fermion_t tv1_d = bfm_d.threadedAllocFermion (mem_fast);
Fermion_t tv2_d = bfm_d.threadedAllocFermion (mem_fast);
double src_norm = bfm_d.norm (src);
int iter = 0;
for (int shift = 0; shift < nshift; shift++) {
double stop = src_norm * mresidual[shift] * mresidual[shift];
bfm_f.thread_barrier (); //ensure all thread writes to bfm_f.residual from previous iteration have completed
bfm_f.residual = mresidual[shift];
for (int i = 0; i < max_cycle; ++i) {
// compute double precision rsd and also new RHS vector.
bfm_d.Mprec (psi[shift], tv1_d, src_d, 0, 0); //here src_d is just used as a temp storage
bfm_d.Mprec (tv1_d, tv2_d, src_d, 1, 0); // tv2_d = MdagM * sol
bfm_d.axpy (tv2_d, psi[shift], tv2_d, mass[shift]); //tv2_d = (MdagM + shift)* sol
double norm = bfm_d.axpy_norm (src_d, tv2_d, src, -1.);
// Hantao's ad hoc stopping condition
if (norm < 100. * stop) {
if (bfm_f.isBoss () && !me) {
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: shift = %d needs no correction: rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d)]\n",
shift, norm, stop, sqrt (norm / src_norm));
}
break;
}
if (!me)
while (norm * bfm_f.residual * bfm_f.residual < stop)
bfm_f.residual *= 2;
//bfm_f.thread_barrier(); //No need, next call has a barrier
threaded_convFermion_fast (src_f, src_d, bfm_f, bfm_d);
switch_comm (bfm_f, bfm_d);
if (bfm_f.isBoss () && !me) {
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: shift = %d, defect correction cycle = %d rsd = %17.10e(d) stop = %17.10e(d) [True resid %17.10e(d), next single prec target resid %17.10e]\n",
shift, i, norm, stop, sqrt (norm / src_norm), bfm_f.residual);
}
bfm_f.set_zero (sol_f);
iter +=
threaded_CGNE_MdagM_plus_shift < float >(sol_f, src_f, mass[shift],
bfm_f);
switch_comm (bfm_d, bfm_f);
threaded_convFermion_fast (tv1_d, sol_f, bfm_d, bfm_f);
bfm_d.axpy (psi[shift], tv1_d, psi[shift], 1.);
}
bfm_f.residual = frsd; //restore original single precision residual at end of each step
}
gettimeofday (&tstop, NULL);
timersub (&tstop, &tstart, &tdiff);
if (bfm_d.isBoss () && !me)
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: defect correction time %d.%6.6d s\n",
tdiff.tv_sec, tdiff.tv_usec);
gettimeofday (&tstart, NULL);
for (int shift = 0; shift < nshift; shift++) {
if (bfm_d.isBoss () && !me) {
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: doing final inversion for shift %d using corrected solution as guess\n",
shift);
fflush (stdout);
}
bfm_d.thread_barrier (); //ensure writes to bfm_d.residual from previous iteration have completed
double restore_resid = bfm_d.residual;
bfm_d.thread_barrier (); //ensure all threads have the same value
bfm_d.residual = mresidual[shift];
int final_iter =
threaded_CGNE_MdagM_plus_shift < double >(psi[shift], src, mass[shift],
bfm_d);
if (final_iter == -1) {
cps::ERR.General ("mixed_cg",
"threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction",
"final inversion for pole %d failed to converge!",
shift);
}
iter += final_iter;
bfm_d.residual = restore_resid;
}
if (single) {
for (int s = 1; s < nshift; s++) {
bfm_d.axpy (psi[0], psi[s], psi[0], 1.0);
}
}
bfm_d.threadedFreeFermion (src_d);
bfm_f.threadedFreeFermion (src_f);
bfm_d.threadedFreeFermion (tv1_d);
bfm_d.threadedFreeFermion (tv2_d);
bfm_f.threadedFreeFermion (sol_f);
gettimeofday (&tstop, NULL);
timersub (&tstop, &tstart, &tdiff);
if (bfm_d.isBoss () && !me)
printf
("threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction: finishing up time %d.%6.6d s\n",
tdiff.tv_sec, tdiff.tv_usec);
return iter;
}
//MaybeOK, but MooeeInv with Gparity should be checked. Disabling for now
//#ifndef BFM_GPARITY
#if 0
inline int threaded_cg_mixed_Mdag (Fermion_t sol[2], Fermion_t src[2],
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f, int max_cycle,
cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d < Fermion_t[2] > *evec = NULL,
multi1d < float >*eval = NULL, int N = 0)
{
int me = bfm_d.thread_barrier ();
Fermion_t be = bfm_d.threadedAllocFermion ();
Fermion_t bo = bfm_d.threadedAllocFermion ();
Fermion_t ta = bfm_d.threadedAllocFermion ();
Fermion_t tb = bfm_d.threadedAllocFermion ();
double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]);
if (bfm_d.isBoss () && !me) {
printf ("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc);
printf ("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n",
bfm_d.CGdiagonalMee);
}
// eo preconditioning
// CGdiagonalMee == 2 has an extra Moo^{\dag-1}
if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv (src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e]
bfm_d.Meo (ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e]
bfm_d.axpy (bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e]
} else if (bfm_d.CGdiagonalMee == 2) {
bfm_d.MooeeInv (src[Even], ta, DaggerYes);
bfm_d.Meo (ta, tb, Odd, DaggerYes);
bfm_d.axpy (tb, tb, src[Odd], -1.0);
bfm_d.MooeeInv (tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
} else {
printf ("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n",
bfm_d.CGdiagonalMee);
exit (-1);
}
// There seems to be no easy way to use an initial guess for this
// inversion, so just set the guess to zero.
bfm_d.set_zero (ta);
// ta = (Mprec^\dag Mprec)^{-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
int iter =
threaded_cg_mixed_MdagM (ta, bo, bfm_d, bfm_f, max_cycle, itype, evec, eval,
N);
bfm_d.Mprec (ta, sol[Odd], tb, DaggerNo); // sol[o] = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
// For CGdiagonalMee == 1 we need to multiply the odd
// solution by MooInv^d
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv (sol[Odd], ta, DaggerYes, Odd);
bfm_d.copy (sol[Odd], ta);
}
bfm_d.Meo (sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o]
bfm_d.axpy (tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o]
bfm_d.MooeeInv (tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o])
double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]);
// compute final residual
Fermion_t tmp[2] = { be, bo };
bfm_d.Munprec (sol, tmp, ta, DaggerYes);
double ndiff = 0.;
for (int i = 0; i < 2; ++i) {
bfm_d.axpy (tb, tmp[i], src[i], -1.0);
ndiff += bfm_d.norm (tb);
}
if (bfm_d.isBoss () && !me) {
printf
("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n",
nsol, sqrt (ndiff / nsrc));
}
bfm_d.threadedFreeFermion (be);
bfm_d.threadedFreeFermion (bo);
bfm_d.threadedFreeFermion (ta);
bfm_d.threadedFreeFermion (tb);
return iter;
}
// Inverts unpreconditioned Mdag by using preconditioned MMdag
// as the inner solver. This allows us to make use of the initial
// guess (so sol needs to be initialized to something reasonable
// before calling this function).
inline int threaded_cg_mixed_Mdag_guess (Fermion_t sol[2], Fermion_t src[2],
bfm_evo < double >&bfm_d,
bfm_evo < float >&bfm_f, int max_cycle,
cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d < Fermion_t[2] > *evec = NULL,
multi1d < float >*eval = NULL,
int N = 0)
{
int me = bfm_d.thread_barrier ();
Fermion_t be = bfm_d.threadedAllocFermion ();
Fermion_t bo = bfm_d.threadedAllocFermion ();
Fermion_t ta = bfm_d.threadedAllocFermion ();
Fermion_t tb = bfm_d.threadedAllocFermion ();
double nsrc = bfm_d.norm (src[0]) + bfm_d.norm (src[1]);
if (bfm_d.isBoss () && !me) {
printf ("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc);
printf ("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n",
bfm_d.CGdiagonalMee);
}
// eo preconditioning
// CGdiagonalMee == 2 has an extra Moo^{\dag-1}
if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv (src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e]
bfm_d.Meo (ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e]
bfm_d.axpy (bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e]
} else if (bfm_d.CGdiagonalMee == 2) {
bfm_d.MooeeInv (src[Even], ta, DaggerYes);
bfm_d.Meo (ta, tb, Odd, DaggerYes);
bfm_d.axpy (tb, tb, src[Odd], -1.0);
bfm_d.MooeeInv (tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
} else {
printf ("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n",
bfm_d.CGdiagonalMee);
exit (-1);
}
// for CGdiagonalMee == 1 the guess needs to get multiplied by Moo^\dag
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.Mooee (sol[Odd], tb, DaggerYes, Odd);
bfm_d.copy (sol[Odd], tb);
}
// ta = Mprec bo
// = Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e])
bfm_d.Mprec (bo, ta, tb, DaggerNo);
// sol[o] = (Mprec Mprec^\dag)^{-1} ta
// = (Mprec Mprec^\dag)^{-1} Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e])
// = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
int iter =
threaded_cg_mixed_MMdag (sol[Odd], ta, bfm_d, bfm_f, max_cycle, itype, evec,
eval, N);
// For CGdiagonalMee == 1 we need to multiply the odd
// solution by MooInv^d
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv (sol[Odd], ta, DaggerYes, Odd);
bfm_d.copy (sol[Odd], ta);
}
bfm_d.Meo (sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o]
bfm_d.axpy (tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o]
bfm_d.MooeeInv (tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o])
double nsol = bfm_d.norm (sol[0]) + bfm_d.norm (sol[1]);
// compute final residual
Fermion_t tmp[2] = { be, bo };
bfm_d.Munprec (sol, tmp, ta, DaggerYes);
double ndiff = 0.;
for (int i = 0; i < 2; ++i) {
bfm_d.axpy (tb, tmp[i], src[i], -1.0);
ndiff += bfm_d.norm (tb);
}
if (bfm_d.isBoss () && !me) {
printf
("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n",
nsol, sqrt (ndiff / nsrc));
}
bfm_d.threadedFreeFermion (be);
bfm_d.threadedFreeFermion (bo);
bfm_d.threadedFreeFermion (ta);
bfm_d.threadedFreeFermion (tb);
return iter;
}
}
#endif
inline int threaded_cg_mixed_Mdag(Fermion_t sol[2], Fermion_t src[2],
bfm_evo<double> &bfm_d, bfm_evo<float> &bfm_f,
int max_cycle, cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d<Fermion_t[2]> *evec = NULL,
multi1d<float> *eval = NULL,
int N = 0)
{
int me = bfm_d.thread_barrier();
Fermion_t be = bfm_d.threadedAllocFermion();
Fermion_t bo = bfm_d.threadedAllocFermion();
Fermion_t ta = bfm_d.threadedAllocFermion();
Fermion_t tb = bfm_d.threadedAllocFermion();
double nsrc = bfm_d.norm(src[0]) + bfm_d.norm(src[1]);
if (bfm_d.isBoss() && !me) {
printf("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc);
printf("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee);
}
// eo preconditioning
// CGdiagonalMee == 2 has an extra Moo^{\dag-1}
if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv(src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e]
bfm_d.Meo(ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e]
bfm_d.axpy(bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e]
} else if (bfm_d.CGdiagonalMee == 2) {
bfm_d.MooeeInv(src[Even], ta, DaggerYes);
bfm_d.Meo(ta, tb, Odd, DaggerYes);
bfm_d.axpy(tb, tb, src[Odd], -1.0);
bfm_d.MooeeInv(tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
} else {
printf("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee);
exit(-1);
}
// There seems to be no easy way to use an initial guess for this
// inversion, so just set the guess to zero.
bfm_d.set_zero(ta);
// ta = (Mprec^\dag Mprec)^{-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
int iter = threaded_cg_mixed_MdagM(ta, bo, bfm_d, bfm_f, max_cycle, itype, evec, eval, N);
bfm_d.Mprec(ta, sol[Odd], tb, DaggerNo); // sol[o] = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
// For CGdiagonalMee == 1 we need to multiply the odd
// solution by MooInv^d
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv(sol[Odd], ta, DaggerYes, Odd);
bfm_d.copy(sol[Odd], ta);
}
bfm_d.Meo(sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o]
bfm_d.axpy(tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o]
bfm_d.MooeeInv(tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o])
double nsol = bfm_d.norm(sol[0]) + bfm_d.norm(sol[1]);
// compute final residual
Fermion_t tmp[2] = { be, bo };
bfm_d.Munprec(sol, tmp, ta, DaggerYes);
double ndiff = 0.;
for (int i = 0; i < 2; ++i) {
bfm_d.axpy(tb, tmp[i], src[i], -1.0);
ndiff += bfm_d.norm(tb);
}
if (bfm_d.isBoss() && !me) {
printf("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n",
nsol, sqrt(ndiff / nsrc));
}
bfm_d.threadedFreeFermion(be);
bfm_d.threadedFreeFermion(bo);
bfm_d.threadedFreeFermion(ta);
bfm_d.threadedFreeFermion(tb);
return iter;
}
// Inverts unpreconditioned Mdag by using preconditioned MMdag
// as the inner solver. This allows us to make use of the initial
// guess (so sol needs to be initialized to something reasonable
// before calling this function).
inline int threaded_cg_mixed_Mdag_guess(Fermion_t sol[2], Fermion_t src[2],
bfm_evo<double> &bfm_d, bfm_evo<float> &bfm_f,
int max_cycle, cps::InverterType itype = cps::CG,
// the following parameters are for deflation
multi1d<Fermion_t[2]> *evec = NULL,
multi1d<float> *eval = NULL,
int N = 0)
{
int me = bfm_d.thread_barrier();
Fermion_t be = bfm_d.threadedAllocFermion();
Fermion_t bo = bfm_d.threadedAllocFermion();
Fermion_t ta = bfm_d.threadedAllocFermion();
Fermion_t tb = bfm_d.threadedAllocFermion();
double nsrc = bfm_d.norm(src[0]) + bfm_d.norm(src[1]);
if (bfm_d.isBoss() && !me) {
printf("threaded_cg_mixed_Mdag: source norm is %17.10e\n", nsrc);
printf("threaded_cg_mixed_Mdag: bfm_d.CGdiagonalMee = %d\n", bfm_d.CGdiagonalMee);
}
// eo preconditioning
// CGdiagonalMee == 2 has an extra Moo^{\dag-1}
if (bfm_d.CGdiagonalMee == 0 || bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv(src[Even], ta, DaggerYes); // ta == Mee^{\dag-1} src[e]
bfm_d.Meo(ta, tb, Odd, DaggerYes); // tb == Moe^\dag Mee^{\dag-1} src[e]
bfm_d.axpy(bo, tb, src[Odd], -1.0); // bo == src[o] - Moe^\dag Mee^{\dag-1} src[e]
} else if (bfm_d.CGdiagonalMee == 2) {
bfm_d.MooeeInv(src[Even], ta, DaggerYes);
bfm_d.Meo(ta, tb, Odd, DaggerYes);
bfm_d.axpy(tb, tb, src[Odd], -1.0);
bfm_d.MooeeInv(tb, bo, DaggerYes); // bo == Moo^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
} else {
printf("threaded_cg_mixed_Mdag: Unknown CGdiagonalMee: %d\n", bfm_d.CGdiagonalMee);
exit(-1);
}
// for CGdiagonalMee == 1 the guess needs to get multiplied by Moo^\dag
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.Mooee(sol[Odd], tb, DaggerYes, Odd);
bfm_d.copy(sol[Odd], tb);
}
// ta = Mprec bo
// = Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e])
bfm_d.Mprec(bo, ta, tb, DaggerNo);
// sol[o] = (Mprec Mprec^\dag)^{-1} ta
// = (Mprec Mprec^\dag)^{-1} Mprec (src[o] - Moe^\dag Mee^{\dag-1} src[e])
// = Mprec^{\dag-1} (src[o] - Moe^\dag Mee^{\dag-1} src[e])
int iter = threaded_cg_mixed_MMdag(sol[Odd], ta, bfm_d, bfm_f, max_cycle, itype, evec, eval, N);
// For CGdiagonalMee == 1 we need to multiply the odd
// solution by MooInv^d
if (bfm_d.CGdiagonalMee == 1) {
bfm_d.MooeeInv(sol[Odd], ta, DaggerYes, Odd);
bfm_d.copy(sol[Odd], ta);
}
bfm_d.Meo(sol[Odd], ta, Even, DaggerYes); // ta == Meo^\dag sol[o]
bfm_d.axpy(tb, ta, src[Even], -1.0); // tb == src[e] - Meo^\dag sol[o]
bfm_d.MooeeInv(tb, sol[Even], DaggerYes); // sol[e] = Mee^{\dag-1} (src[e] - Meo^\dag sol[o])
double nsol = bfm_d.norm(sol[0]) + bfm_d.norm(sol[1]);
// compute final residual
Fermion_t tmp[2] = { be, bo };
bfm_d.Munprec(sol, tmp, ta, DaggerYes);
double ndiff = 0.;
for (int i = 0; i < 2; ++i) {
bfm_d.axpy(tb, tmp[i], src[i], -1.0);
ndiff += bfm_d.norm(tb);
}
if (bfm_d.isBoss() && !me) {
printf("threaded_cg_mixed_Mdag: unprec sol norm = %17.10e, residual = %17.10e\n",
nsol, sqrt(ndiff / nsrc));
}
bfm_d.threadedFreeFermion(be);
bfm_d.threadedFreeFermion(bo);
bfm_d.threadedFreeFermion(ta);
bfm_d.threadedFreeFermion(tb);
return iter;
}
}
CPS_START_NAMESPACE
#if 1
//Controls the version of the multi-shift algorithm used. The user can define different versions to use in different environments, for example if you
//want to use an approximate method within the molecular dynamics evolution.
//The current environment must be set manually. It defaults to Generic
class MultiShiftCGcontroller {
public:
enum Mode
{ SINGLE_PREC,
DOUBLE_PREC,
SINGLE_PREC_PLUS_OUTER_DEFECT_CORRECTION_LOOP, //Single precision multi-shift followed by single precision restarted defect correction loop over poles
SINGLE_PREC_AS_DOUBLE_PREC_GUESS, //Single precision multi-shift followed by double precision multi-shift using the single prec results as a guess (using Osborn's method)
SINGLE_PREC_RESTARTED_AS_DOUBLE_PREC_GUESS, //Restarted single precision multi-shift with defect correction followed by double precision multi-shift using the single prec results as a guess (also using Osborn's method)
SINGLE_PREC_RELIABLE_UPDATE_PLUS_OUTER_DEFECT_CORRECTION_LOOP, //Single precision multi-shift with reliable update followed by single precision restarted defect correction loop over poles
NMultiShiftCGMode
};
enum Environment
{ MolecularDynamics, EnergyCalculation, Heatbath, Generic,
NMultiShiftEnvironment };
private:
Mode environ_mode[(int) NMultiShiftEnvironment];
Environment current_environment;
double minimum_single_prec_residual; //For variants with an initial single precision solve, the stopping conditions are set equal to the larger of the double precision residual and this bound. Does not apply
//to the reliable update version.
int reliable_update_freq; //Used in versions with reliable update
int max_defect_correction_cycles;
public:
MultiShiftCGcontroller ():current_environment (Generic),
minimum_single_prec_residual (1e-08),
reliable_update_freq (100), max_defect_correction_cycles (500) {
for (int i = 0; i < (int) NMultiShiftEnvironment; i++)
environ_mode[i] = DOUBLE_PREC;
}
void setEnvironmentMode (const Environment & environ, const Mode & mode)
{
environ_mode[(int) environ] = mode;
}
void setEnvironment (const Environment & environ)
{
current_environment = environ;
}
const Mode & getMode () const
{
return environ_mode[(int) current_environment];
}
void setMinimumSinglePrecResidual (const double &r)
{
minimum_single_prec_residual = r;
}
void setReliableUpdateFreq (const int &f)
{
reliable_update_freq = f;
}
void setMaximumDefectCorrectionCycles (const int &c)
{
max_defect_correction_cycles = c;
}
int MInv (Fermion_t * sol_multi, Fermion_t src,
Float * shift, int Nshift,
Float * mresidual, Float * alpha, int single,
bfm_evo < double >&bd, bfm_evo < float >&bf)
{
const Mode & mode = getMode ();
int iter;
if (mode == SINGLE_PREC) { //Note, this uses the residuals specified in the cg_arg without modification
#pragma omp parallel
{
Fermion_t src_f = bf.threadedAllocFermion ();
Fermion_t sol_f[Nshift];
for (int i = 0; i < Nshift; i++)
sol_f[i] = bf.threadedAllocFermion ();
mixed_cg::threaded_convFermion (src_f, src, bf, bd);
mixed_cg::switch_comm (bf, bd);
iter =
bf.CGNE_prec_MdagM_multi_shift (sol_f, src_f, shift, alpha, Nshift,
mresidual, single);
mixed_cg::switch_comm (bd, bf);
for (int i = 0; i < Nshift; i++) {
mixed_cg::threaded_convFermion (sol_multi[i], sol_f[i], bd, bf);
bf.threadedFreeFermion (sol_f[i]);
}
bf.threadedFreeFermion (src_f);
}
} else if (mode == DOUBLE_PREC) {
#pragma omp parallel
{
iter =
bd.CGNE_prec_MdagM_multi_shift (sol_multi, src, shift, alpha, Nshift,
mresidual, single);
}
} else if (mode == SINGLE_PREC_PLUS_OUTER_DEFECT_CORRECTION_LOOP) {
double fresidual[Nshift]; //residuals for initial single prec solve
for (int s = 0; s < Nshift; s++)
fresidual[s] =
(mresidual[s] >=
minimum_single_prec_residual ? mresidual[s] :
minimum_single_prec_residual);
#pragma omp parallel
{
iter =
mixed_cg::
threaded_cg_mixed_defect_correction_multi_shift_MdagM (sol_multi, src,
shift, alpha,
bd, bf, Nshift,
mresidual,
fresidual,
single,
max_defect_correction_cycles);
}
} else if (mode == SINGLE_PREC_AS_DOUBLE_PREC_GUESS) {
double fresidual[Nshift]; //residuals for initial single prec solve
for (int s = 0; s < Nshift; s++)
fresidual[s] =
(mresidual[s] >=
minimum_single_prec_residual ? mresidual[s] :
minimum_single_prec_residual);
#pragma omp parallel
{
iter =
mixed_cg::
threaded_cg_mixed_single_prec_as_guess_multi_shift_MdagM (sol_multi,
src, shift,
alpha,
Nshift,
mresidual,
fresidual,
single, bd,
bf);
}
} else if (mode == SINGLE_PREC_RESTARTED_AS_DOUBLE_PREC_GUESS) {
double fresidual[Nshift]; //residuals for initial single prec solve
for (int s = 0; s < Nshift; s++)
fresidual[s] =
(mresidual[s] >=
minimum_single_prec_residual ? mresidual[s] :
minimum_single_prec_residual);
#pragma omp parallel
{
iter =
mixed_cg::threaded_cg_mixed_restarted_multi_shift_MdagM (sol_multi,
src, shift,
alpha,
Nshift,
mresidual,
fresidual,
single, bd,
bf,
max_defect_correction_cycles);
}
} else if (mode ==
SINGLE_PREC_RELIABLE_UPDATE_PLUS_OUTER_DEFECT_CORRECTION_LOOP) {
#pragma omp parallel
{
iter =
mixed_cg::
threaded_cg_mixed_multi_shift_MdagM_sp_relup_dp_defect_correction
(sol_multi, src, shift, alpha, Nshift, mresidual, single, bf, bd,
reliable_update_freq, -1, max_defect_correction_cycles);
}
} else
ERR.General ("_MultiShiftCGargs", "MInv(..)",
"Unknown multi-shift mode\n");
return iter;
}
};
extern MultiShiftCGcontroller MultiShiftController; //global instance (created in fbfm.C)
#endif
CPS_END_NAMESPACE
#endif
|
test2.c | #pragma wave trace(enable)
int a[100][100];
void foo()
{
int i,j;
int hypre__nx,hypre__ny;
#define HYPRE_BOX_SMP_PRIVATE i,j
#define HYPRE_SMP_PRIVATE \
HYPRE_BOX_SMP_PRIVATE,hypre__nx,hypre__ny
#pragma omp parallel for private (HYPRE_SMP_PRIVATE)
for (i=0;i<100; i++)
for (j=0;j<100; j++)
{
hypre__nx =i;
hypre__ny=j;
a[i][j]=hypre__nx+hypre__ny;
}
}
|
DRB091-threadprivate2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
This is the case for a variable referenced within a construct.
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
#pragma omp threadprivate(sum0)
int main()
{
int len=1000;
int i, sum=0;
#pragma omp parallel copyin(sum0)
{
#pragma omp for
for (i=0;i<len;i++)
{
sum0=sum0+i;
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=0;i<len;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
return 0;
}
|
interpolate_structural_solution_for_dem_utility.h | /*
* Author: Salva Latorre and Ignasi Pouplana
*
* latorre@cimne.upc.edu
* ipouplana@cimne.upc.edu
*/
#ifndef INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
#define INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
#include "includes/variables.h"
#include <limits>
#include <iostream>
#include <iomanip>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "includes/define.h"
#include "includes/condition.h"
#include "includes/model_part.h"
#include "dem_structures_coupling_application_variables.h"
namespace Kratos {
class InterpolateStructuralSolutionForDEM {
public:
typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(InterpolateStructuralSolutionForDEM);
InterpolateStructuralSolutionForDEM() {}
virtual ~InterpolateStructuralSolutionForDEM() {}
void SaveStructuralSolution(ModelPart& r_structural_model_part) {
KRATOS_TRY
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator itNode = node_begin + i;
array_1d<double,3>& r_current_velocity = itNode->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_VELOCITY);
noalias(r_current_velocity) = itNode->FastGetSolutionStepValue(VELOCITY);
array_1d<double,3>& r_current_displacement = itNode->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT);
noalias(r_current_displacement) = itNode->FastGetSolutionStepValue(DISPLACEMENT);
array_1d<double,3>& r_smoothed_velocity = itNode->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY);
noalias(r_smoothed_velocity) = 1.0/3.0 * (itNode->FastGetSolutionStepValue(VELOCITY) + 2.0 * itNode->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY, 1));
}
KRATOS_CATCH("")
}
void InterpolateStructuralSolution(ModelPart& r_structural_model_part, const double fem_delta_time, const double fem_time, const double dem_delta_time, const double dem_time) {
KRATOS_TRY
const double previous_fem_time = fem_time - fem_delta_time;
const double time_factor = (dem_time - previous_fem_time) / fem_delta_time;
const double previous_time_factor = (dem_time - dem_delta_time - previous_fem_time) / fem_delta_time;
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator it_node = node_begin + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates()
+ it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
+ (it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * time_factor;
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double,3>& previous_velocity = it_node->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY,1);
noalias(r_velocity) = previous_velocity + (it_node->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY) - previous_velocity) * time_factor;
array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
noalias(r_displacement) = it_node->Coordinates() - it_node->GetInitialPosition().Coordinates();
array_1d<double, 3> previous_coordinates;
noalias(previous_coordinates) = it_node->GetInitialPosition().Coordinates()
+ it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
+ (it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * previous_time_factor;
array_1d<double,3>& delta_displacement = it_node->FastGetSolutionStepValue(DELTA_DISPLACEMENT);
noalias(delta_displacement) = it_node->Coordinates() - previous_coordinates;
}
KRATOS_CATCH("")
}
void RestoreStructuralSolution(ModelPart& r_structural_model_part) {
KRATOS_TRY
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator it_node = node_begin + i;
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
noalias(r_velocity) = it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_VELOCITY);
array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
noalias(r_displacement) = it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT);
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates() + it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
KRATOS_CATCH("")
}
// void SaveStructuralSolution(ModelPart& r_structural_model_part) {
// KRATOS_TRY
// const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
// ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
// #pragma omp parallel for
// for (int i = 0; i < NNodes; i++) {
// ModelPart::NodesContainerType::iterator itNode = node_begin + i;
// array_1d<double,3>& r_current_velocity = itNode->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_VELOCITY);
// noalias(r_current_velocity) = itNode->FastGetSolutionStepValue(VELOCITY);
// array_1d<double,3>& r_current_displacement = itNode->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT);
// noalias(r_current_displacement) = itNode->FastGetSolutionStepValue(DISPLACEMENT);
// array_1d<double,3>& r_smoothed_velocity = itNode->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY);
// noalias(r_smoothed_velocity) = 1.0/3.0 * (itNode->FastGetSolutionStepValue(VELOCITY) + 2.0 * itNode->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY, 1));
// }
// KRATOS_CATCH("")
// }
// void InterpolateStructuralSolution(ModelPart& r_structural_model_part, const double fem_delta_time, const double fem_time, const double dem_delta_time, const double dem_time) {
// KRATOS_TRY
// const double previous_fem_time = fem_time - fem_delta_time;
// const double time_factor = (dem_time - previous_fem_time) / fem_delta_time;
// const double previous_time_factor = (dem_time - dem_delta_time - previous_fem_time) / fem_delta_time;
// const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
// ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
// #pragma omp parallel for
// for (int i = 0; i < NNodes; i++) {
// ModelPart::NodesContainerType::iterator it_node = node_begin + i;
// noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates()
// + it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
// + (it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * time_factor;
// array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
// const array_1d<double,3>& previous_velocity = it_node->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY,1);
// noalias(r_velocity) = previous_velocity + (it_node->FastGetSolutionStepValue(SMOOTHED_STRUCTURAL_VELOCITY) - previous_velocity) * time_factor;
// array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
// noalias(r_displacement) = it_node->Coordinates() - it_node->GetInitialPosition().Coordinates();
// array_1d<double, 3> previous_coordinates;
// noalias(previous_coordinates) = it_node->GetInitialPosition().Coordinates()
// + it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
// + (it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * previous_time_factor;
// array_1d<double,3>& delta_displacement = it_node->FastGetSolutionStepValue(DELTA_DISPLACEMENT);
// noalias(delta_displacement) = it_node->Coordinates() - previous_coordinates;
// }
// KRATOS_CATCH("")
// }
// void RestoreStructuralSolution(ModelPart& r_structural_model_part) {
// KRATOS_TRY
// const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
// ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
// #pragma omp parallel for
// for (int i = 0; i < NNodes; i++) {
// ModelPart::NodesContainerType::iterator it_node = node_begin + i;
// array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
// noalias(r_velocity) = it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_VELOCITY);
// array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
// noalias(r_displacement) = it_node->FastGetSolutionStepValue(BACKUP_LAST_STRUCTURAL_DISPLACEMENT);
// noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates() + it_node->FastGetSolutionStepValue(DISPLACEMENT);
// }
// KRATOS_CATCH("")
// }
virtual std::string Info() const { return "";}
virtual void PrintInfo(std::ostream& rOStream) const {}
virtual void PrintData(std::ostream& rOStream) const {}
private:
InterpolateStructuralSolutionForDEM& operator= (InterpolateStructuralSolutionForDEM const& rOther);
}; // class InterpolateStructuralSolutionForDEM
} // namespace Kratos
#endif // INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
|
progress_reduction-inl.h | #ifndef PROGRESS_REDUCTION_INL
#define PROGRESS_REDUCTION_INL
#if defined(_OPENMP)
#include "omp.h"
#endif
#include "errors/invariants.h"
namespace nut{
inline ProgressReduction::ProgressReduction(AbstractProgress* external_progress, unsigned int num_external_steps):
external_progress_(external_progress),
num_external_steps_(num_external_steps),
num_internal_steps_(0),
reduction_ratio_(0),
last_emitted_step_(0),
internal_steps_(0)
{
}
inline void ProgressReduction::MakeAStep(){
if(external_progress_ == nullptr)
return;
#if defined(_OPENMP)
#pragma omp critical
#endif
{
internal_steps_ += reduction_ratio_;
}
#if defined(_OPENMP)
if(omp_get_thread_num() != 0){
return;
}
#endif
const double to_increment(internal_steps_ - last_emitted_step_);
assert(to_increment >= 0);
unsigned int to_increment_int (to_increment);
external_progress_->MakeNSteps(to_increment_int);
last_emitted_step_ += to_increment_int;
}
inline void ProgressReduction::MakeNSteps(unsigned int n){
for(unsigned int i = 0; i < n; ++i){
MakeAStep();
}
}
inline bool ProgressReduction::IsCanceled()const{
return (external_progress_ != nullptr)?external_progress_->IsCanceled():false;
}
inline void ProgressReduction::Reset(){
last_emitted_step_ = 0;
num_internal_steps_ = 0;
internal_steps_ = 0.;
}
inline void ProgressReduction::Finalize(){
PRECONDITION(last_emitted_step_ <= num_external_steps_);
if(external_progress_ == nullptr)
return;
int final_steps (static_cast<int>(num_external_steps_-last_emitted_step_));
assert(final_steps >= 0);
for(int i = 0; i < final_steps; ++i){
external_progress_->MakeAStep();
++last_emitted_step_;
}
internal_steps_ = last_emitted_step_;
}
inline void ProgressReduction::set_num_external_steps(unsigned int steps){
num_external_steps_ = steps;
reduction_ratio_ =(num_internal_steps_!=0)?double(num_external_steps_)/num_internal_steps_:0;
}
inline void ProgressReduction::set_num_internal_steps(unsigned int steps){
num_internal_steps_ = steps;
reduction_ratio_ =(num_internal_steps_!=0)?double(num_external_steps_)/num_internal_steps_:0;
}
inline void ProgressReduction::set_external_progress(AbstractProgress* prg){
external_progress_ = prg;
}
inline unsigned int ProgressReduction::num_external_steps()const{
return num_external_steps_;
}
inline unsigned int ProgressReduction::num_internal_steps()const{
return num_internal_steps_;
}
inline double ProgressReduction::reduction_ratio()const{
return reduction_ratio_;
}
inline AbstractProgress* ProgressReduction::external_progress(){
return external_progress_;
}
inline const AbstractProgress* ProgressReduction::external_progress()const{
return external_progress_;
}
}
#endif // PROGRESS_REDUCTION_INL |
YAKL_reductions.h |
#pragma once
template <class T, int myMem> class ParallelMin;
template <class T, int myMem> class ParallelMax;
template <class T, int myMem> class ParallelSum;
#ifdef YAKL_ARCH_HIP
template <class T> class ParallelMin<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMin() { tmp = NULL; }
ParallelMin(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelMin() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
hipcub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
hipcub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
hipcub::DeviceReduce::Min(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
template <class T> class ParallelMax<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMax() { tmp = NULL; }
ParallelMax(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelMax() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
hipcub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
hipcub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
hipcub::DeviceReduce::Max(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
template <class T> class ParallelSum<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelSum() { tmp = NULL; }
ParallelSum(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelSum() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
hipcub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
hipcub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
hipcub::DeviceReduce::Sum(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
#elif defined(YAKL_ARCH_CUDA)
template <class T> class ParallelMin<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMin() { tmp = NULL; }
ParallelMin(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelMin() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
cub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
cub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
cub::DeviceReduce::Min(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
template <class T> class ParallelMax<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMax() { tmp = NULL; }
ParallelMax(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelMax() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
cub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
cub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
cub::DeviceReduce::Max(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
template <class T> class ParallelSum<T,memDevice> {
void *tmp; // Temporary storage
size_t nTmp; // Size of temporary storage
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelSum() { tmp = NULL; }
ParallelSum(int const nItems) { tmp = NULL; setup(nItems); }
~ParallelSum() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
// Get the amount of temporary storage needed (call with NULL storage pointer)
cub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems );
tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage
this->nItems = nItems;
}
void finalize() {
if (tmp != NULL) {
yaklFreeDevice(rsltP,"");
yaklFreeDevice(tmp,"");
}
tmp = NULL;
}
T operator() (T *data) {
T rslt;
cub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction
cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host
check_last_error();
fence();
return rslt;
}
void deviceReduce(T *data, T *devP) {
cub::DeviceReduce::Sum(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction
#if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG)
fence();
#endif
}
};
#elif defined(YAKL_ARCH_SYCL)
static inline size_t get_wg_size_for_reduction(size_t bytes_per_wi) {
// The best work-group size depends on implementation details
// We make the following assumptions, which aren't specific to DPC++:
// - Bigger work-groups are better
// - An implementation may reserve 1 element per work-item in shared memory
// In practice, DPC++ seems to limit itself to 1/2 of this
const size_t max_size = sycl_default_stream->get_device().get_info<sycl::info::device::max_work_group_size>();
const size_t local_mem = sycl_default_stream->get_device().get_info<sycl::info::device::local_mem_size>();
return std::min(local_mem / bytes_per_wi, max_size) / 2;
}
static inline size_t round_up(size_t N, size_t multiple) { return ((N + multiple - 1) / multiple) * multiple; }
template <class T>
static inline sycl::nd_range<1> get_reduction_range(size_t N, T reductionVars) {
size_t bytes_per_wi = sizeof( std::remove_pointer_t<T> );
size_t L = get_wg_size_for_reduction(bytes_per_wi);
size_t G = round_up(N, L);
return sycl::nd_range<1>{G, L};
}
template <class T> class ParallelMin<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMin() { rsltP = nullptr; }
ParallelMin(int const nItems) { rsltP = nullptr; setup(nItems); }
~ParallelMin() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
this->nItems = nItems;
}
void finalize() {
if(rsltP != nullptr) {
yaklFreeDevice(rsltP,"");
}
rsltP = nullptr;
}
T operator() (T *data) {
T rslt=0;
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, rsltP),
sycl::reduction(rsltP, sycl::minimum<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& min) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
min.combine(data[i]);
}
});
}).wait();
sycl_default_stream->memcpy(&rslt,rsltP,sizeof(T)).wait(); // Copy result to host
return rslt;
}
void deviceReduce(T *data, T *devP) {
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, devP),
sycl::reduction(devP, sycl::minimum<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& min) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
min.combine(data[i]);
}
});
}).wait();
}
};
template <class T> class ParallelMax<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelMax() { rsltP = nullptr; }
ParallelMax(int const nItems) { rsltP = nullptr; setup(nItems); }
~ParallelMax() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
this->nItems = nItems;
}
void finalize() {
if(rsltP != nullptr) {
yaklFreeDevice(rsltP,"");
}
rsltP = nullptr;
}
T operator() (T *data) {
T rslt=0;
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, rsltP),
sycl::reduction(rsltP, sycl::maximum<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& max) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
max.combine(data[i]);
}
});
}).wait();
sycl_default_stream->memcpy(&rslt,rsltP,sizeof(T)).wait(); // Copy result to host
return rslt;
}
void deviceReduce(T *data, T *devP) {
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, devP),
sycl::reduction(devP, sycl::maximum<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& max) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
max.combine(data[i]);
}
});
}).wait();
}
};
template <class T> class ParallelSum<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
T *rsltP; // Device pointer for reduction result
public:
ParallelSum() { rsltP = nullptr; }
ParallelSum(int const nItems) { rsltP = nullptr; setup(nItems); }
~ParallelSum() { finalize(); }
void setup(int const nItems) {
finalize();
rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result
this->nItems = nItems;
}
void finalize() {
if(rsltP != nullptr) {
yaklFreeDevice(rsltP,"");
}
rsltP = nullptr;
}
T operator() (T *data) {
T rslt=0;
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, rsltP),
sycl::reduction(rsltP, std::plus<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& sum) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
sum.combine(data[i]);
}
});
}).wait();
sycl_default_stream->memcpy(&rslt,rsltP,sizeof(T)).wait(); // Copy result to host
return rslt;
}
void deviceReduce(T *data, T *devP) {
sycl_default_stream->submit([&, nItems = this->nItems](sycl::handler &cgh) {
cgh.parallel_for(get_reduction_range(nItems, rsltP),
sycl::reduction(rsltP, std::plus<>(), sycl::property::reduction::initialize_to_identity{}),
[=](sycl::nd_item<1> idx, auto& sum) {
const int i = idx.get_global_linear_id();
if (i < nItems) {
sum.combine(data[i]);
}
});
}).wait();
}
};
#elif defined(YAKL_ARCH_OPENMP45)
template <class T> class ParallelSum<T,memDevice> {
int nItems;
public:
ParallelSum() {}
ParallelSum(int const nItems) {
this->nItems = nItems;
}
~ParallelSum() { }
T operator() (T *data) {
T rslt = 0;
#pragma omp target teams distribute parallel for simd reduction(+:rslt) is_device_ptr(data)
for(int i=0; i<nItems; i++) {
rslt += data[i];
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = 0;
#pragma omp target teams distribute parallel for simd reduction(+:rslt) is_device_ptr(data)
for (int i=0; i<nItems; i++) {
rslt += data[i];
}
omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device());
#pragma omp taskwait
check_last_error();
}
};
template <class T> class ParallelMin<T,memDevice> {
int nItems;
public:
ParallelMin() {}
ParallelMin(int const nItems) {
this->nItems = nItems;
}
~ParallelMin() { }
T operator() (T *data) {
T rslt = std::numeric_limits<T>::max();
#pragma omp target teams distribute parallel for simd reduction(min:rslt) is_device_ptr(data)
for(int i=0; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = std::numeric_limits<T>::max();
#pragma omp target teams distribute parallel for simd reduction(min:rslt) is_device_ptr(data)
for (int i=0; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device());
#pragma omp taskwait
check_last_error();
}
};
template <class T> class ParallelMax<T,memDevice> {
int nItems;
public:
ParallelMax() {}
ParallelMax(int const nItems) {
this->nItems = nItems;
}
~ParallelMax() { }
T operator() (T *data) {
T rslt = std::numeric_limits<T>::lowest();
#pragma omp target teams distribute parallel for simd reduction(max:rslt) is_device_ptr(data)
for(int i=0; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = std::numeric_limits<T>::lowest();
#pragma omp target teams distribute parallel for simd reduction(max:rslt) is_device_ptr(data)
for (int i=0; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device());
#pragma omp taskwait
check_last_error();
}
};
#elif defined(YAKL_ARCH_OPENMP)
template <class T> class ParallelSum<T,memDevice> {
int nItems;
public:
ParallelSum() {}
ParallelSum(int const nItems) {
this->nItems = nItems;
}
~ParallelSum() { }
T operator() (T *data) {
T rslt = 0;
#pragma omp parallel for reduction(+:rslt)
for(int i=0; i<nItems; i++) {
rslt += data[i];
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = 0;
#pragma omp parallel for reduction(+:rslt)
for (int i=0; i<nItems; i++) {
rslt += data[i];
}
*devP = rslt;
}
};
template <class T> class ParallelMin<T,memDevice> {
int nItems;
public:
ParallelMin() {}
ParallelMin(int const nItems) {
this->nItems = nItems;
}
~ParallelMin() { }
T operator() (T *data) {
T rslt = std::numeric_limits<T>::max();
#pragma omp parallel for reduction(min:rslt)
for(int i=0; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = std::numeric_limits<T>::max();
#pragma omp parallel for reduction(min:rslt)
for (int i=0; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
*devP = rslt;
}
};
template <class T> class ParallelMax<T,memDevice> {
int nItems;
public:
ParallelMax() {}
ParallelMax(int const nItems) {
this->nItems = nItems;
}
~ParallelMax() { }
T operator() (T *data) {
T rslt = std::numeric_limits<T>::lowest();
#pragma omp parallel for reduction(max:rslt)
for(int i=0; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *devP) {
T rslt = std::numeric_limits<T>::lowest();
#pragma omp parallel for reduction(max:rslt)
for (int i=0; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
*devP = rslt;
}
};
#else
template <class T> class ParallelMin<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelMin() {}
ParallelMin(int const nItems) {
this->nItems = nItems;
}
~ParallelMin() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) = data[i] < *(rslt) ? data[i] : rslt;
}
}
};
template <class T> class ParallelMax<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelMax() {}
ParallelMax(int const nItems) {
this->nItems = nItems;
}
~ParallelMax() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) = data[i] > *(rslt) ? data[i] : rslt;
}
}
};
template <class T> class ParallelSum<T,memDevice> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelSum() {}
ParallelSum(int const nItems) {
this->nItems = nItems;
}
~ParallelSum() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt += data[i];
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) += data[i];
}
}
};
#endif
template <class T> class ParallelMin<T,memHost> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelMin() {}
ParallelMin(int const nItems) {
this->nItems = nItems;
}
~ParallelMin() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt = data[i] < rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) = data[i] < *(rslt) ? data[i] : rslt;
}
}
};
template <class T> class ParallelMax<T,memHost> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelMax() {}
ParallelMax(int const nItems) {
this->nItems = nItems;
}
~ParallelMax() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt = data[i] > rslt ? data[i] : rslt;
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) = data[i] > *(rslt) ? data[i] : rslt;
}
}
};
template <class T> class ParallelSum<T,memHost> {
int nItems; // Number of items in the array that will be reduced
public:
ParallelSum() {}
ParallelSum(int const nItems) {
this->nItems = nItems;
}
~ParallelSum() {
}
void setup(int nItems) { this->nItems = nItems; }
T operator() (T *data) {
T rslt = data[0];
for (int i=1; i<nItems; i++) {
rslt += data[i];
}
return rslt;
}
void deviceReduce(T *data, T *rslt) {
*(rslt) = data[0];
for (int i=1; i<nItems; i++) {
*(rslt) += data[i];
}
}
};
|
openmp_common.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 -o - %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 -o - %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
void foo() {
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
}
typedef struct S {
#pragma omp parallel for private(j) schedule(static) if (tree1->totleaf > 1024) // expected-error {{unexpected OpenMP directive '#pragma omp parallel for'}}
} St;
|
2.hello.c | #include <stdio.h>
#include <omp.h>
/* If the OMP_NUM_THREADS variable is set to 8 with */
/* export OMP_NUM_THREADS=8 */
/* Q1: Is the execution of the program correct? Add a */
/* data sharing clause to make it correct */
/* Q2: Are the lines always printed in the same order? */
/* Could the messages appear intermixed? */
int main ()
{
int id;
#pragma omp parallel
{
#pragma omp critical
{
id =omp_get_thread_num();
printf("(%d) Hello ",id);
printf("(%d) world!\n",id);
}
}
return 0;
}
|
sparse_matrix_multiplication_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED )
#define KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED
// System includes
#include <vector>
#include <math.h>
#include <algorithm>
#include <numeric>
#ifdef _OPENMP
#include <omp.h>
#endif
// External includes
#include "amgcl/value_type/interface.hpp"
// Project includes
#include "includes/define.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class SparseMatrixMultiplicationUtility
* @ingroup KratosCore
* @brief An utility to multiply sparse matrix in Ublas
* @details Taken and adapted for ublas from external_libraries/amgcl/detail/spgemm.hpp by Denis Demidov <dennis.demidov@gmail.com>
* @todo Remove as soon as we do not depend of Ublas anymore...
* @author Vicente Mataix Ferrandiz
*/
class SparseMatrixMultiplicationUtility
{
public:
///@name Type Definitions
///@{
/// Pointer definition of TreeContactSearch
KRATOS_CLASS_POINTER_DEFINITION( SparseMatrixMultiplicationUtility );
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// The signed index type
typedef std::ptrdiff_t SignedIndexType;
/// A vector of indexes
typedef DenseVector<IndexType> IndexVectorType;
/// A vector of indexes (signed)
typedef DenseVector<SignedIndexType> SignedIndexVectorType;
///@}
///@name Life Cycle
///@{
/// Default constructor
SparseMatrixMultiplicationUtility(){};
/// Desctructor
virtual ~SparseMatrixMultiplicationUtility()= default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Metafunction that returns value type of a matrix or a vector type.
template <class T, class Enable = void>
struct value_type {
typedef typename T::value_type type;
};
/**
* @brief Matrix-matrix product C = A·B
* @detail This method uses a template for each matrix
* @param rA The first matrix
* @param rB The second matrix
* @param rC The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplication(
const AMatrix& rA,
const BMatrix& rB,
CMatrix& rC
)
{
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif
if (nt > 16) {
MatrixMultiplicationRMerge(rA, rB, rC);
} else {
MatrixMultiplicationSaad(rA, rB, rC);
}
}
/**
* @brief The first is an OpenMP-enabled modification of classic algorithm from Saad
* @details It is used whenever number of OpenMP cores is 4 or less. Saad, Yousef. Iterative methods for sparse linear systems. Siam, 2003.
* @param A The first matrix to multiply
* @param B The second matrix to multiply
* @param C The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplicationSaad(
const AMatrix& A,
const BMatrix& B,
CMatrix& C
)
{
typedef typename value_type<CMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = B.size2();
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// Get access to A, B and C data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType* c_ptr = new IndexType[nrows + 1];
c_ptr[0] = 0;
#pragma omp parallel
{
SignedIndexVectorType marker(ncols);
for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill)
marker[i_fill] = -1;
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
IndexType C_cols = 0;
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const IndexType row_begin_b = index1_b[ca];
const IndexType row_end_b = index1_b[ca+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
if (marker[cb] != ia) {
marker[cb] = ia;
++C_cols;
}
}
}
c_ptr[ia + 1] = C_cols;
}
}
// We initialize the sparse matrix
std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr);
const SizeType nonzero_values = c_ptr[nrows];
IndexType* aux_index2_c = new IndexType[nonzero_values];
ValueType* aux_val_c = new ValueType[nonzero_values];
#pragma omp parallel
{
SignedIndexVectorType marker(ncols);
for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill)
marker[i_fill] = -1;
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
const IndexType row_beg = c_ptr[ia];
IndexType row_end = row_beg;
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const ValueType va = values_a[ja];
const IndexType row_begin_b = index1_b[ca];
const IndexType row_end_b = index1_b[ca+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
const ValueType vb = values_b[jb];
if (marker[cb] < static_cast<SignedIndexType>(row_beg)) {
marker[cb] = row_end;
aux_index2_c[row_end] = cb;
aux_val_c[row_end] = va * vb;
++row_end;
} else {
aux_val_c[marker[cb]] += va * vb;
}
}
}
}
}
// We reorder the rows
SortRows(c_ptr, nrows, ncols, aux_index2_c, aux_val_c);
// We fill the matrix
CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c);
// Release memory
delete[] c_ptr;
delete[] aux_index2_c;
delete[] aux_val_c;
}
/**
* @brief Row-merge algorithm from Rupp et al.
* @details The algorithm requires less memory and shows much better scalability than classic one. It is used when number of OpenMP cores is more than 4.
* @param A The first matrix to multiply
* @param B The second matrix to multiply
* @param C The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplicationRMerge(
const AMatrix &A,
const BMatrix &B,
CMatrix &C
)
{
typedef typename value_type<CMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = B.size2();
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// Get access to A and B data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType max_row_width = 0;
#pragma omp parallel
{
IndexType my_max = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
IndexType row_width = 0;
for(IndexType j = row_beg; j < row_end; ++j) {
const IndexType a_col = index2_a[j];
row_width += index1_b[a_col + 1] - index1_b[a_col];
}
my_max = std::max(my_max, row_width);
}
#pragma omp critical
max_row_width = std::max(max_row_width, my_max);
}
#ifdef _OPENMP
const int nthreads = omp_get_max_threads();
#else
const int nthreads = 1;
#endif
std::vector< std::vector<IndexType> > tmp_col(nthreads);
std::vector< std::vector<ValueType> > tmp_val(nthreads);
for(int i = 0; i < nthreads; ++i) {
tmp_col[i].resize(3 * max_row_width);
tmp_val[i].resize(2 * max_row_width);
}
// We create the c_ptr auxiliar variable
IndexType* c_ptr = new IndexType[nrows + 1];
c_ptr[0] = 0;
#pragma omp parallel
{
#ifdef _OPENMP
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
IndexType* t_col = &tmp_col[tid][0];
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
c_ptr[i+1] = ProdRowWidth( index2_a + row_beg, index2_a + row_end, index1_b, index2_b, t_col, t_col + max_row_width, t_col + 2 * max_row_width );
}
}
// We initialize the sparse matrix
std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr);
const SizeType nonzero_values = c_ptr[nrows];
IndexType* aux_index2_c = new IndexType[nonzero_values];
ValueType* aux_val_c = new ValueType[nonzero_values];
#pragma omp parallel
{
#ifdef _OPENMP
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
IndexType* t_col = tmp_col[tid].data();
ValueType *t_val = tmp_val[tid].data();
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
ProdRow(index2_a + row_beg, index2_a + row_end, values_a + row_beg,
index1_b, index2_b, values_b, aux_index2_c + c_ptr[i], aux_val_c + c_ptr[i], t_col, t_val, t_col + max_row_width, t_val + max_row_width );
}
}
// We fill the matrix
CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c);
// Release memory
delete[] c_ptr;
delete[] aux_index2_c;
delete[] aux_val_c;
}
/**
* @brief The first is a method in order to sum to sparse matrices in a efficient way
* @param A The resulting matrix
* @param B The second matrix to sum
*/
template <class AMatrix, class BMatrix>
static void MatrixAdd(
AMatrix& A,
const BMatrix& B,
const double Factor = 1.0
)
{
typedef typename value_type<AMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = A.size2();
/* Some checks */
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
KRATOS_ERROR_IF_NOT(nrows == B.size1()) << "The second matrix has a wrong number of rows" << std::endl;
KRATOS_ERROR_IF_NOT(ncols == B.size2()) << "The second matrix has a wrong number of columns" << std::endl;
// Get access to A and B data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType* new_a_ptr = new IndexType[nrows + 1];
new_a_ptr[0] = 0;
#pragma omp parallel
{
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
SignedIndexVectorType marker(ncols);
for (int i = 0; i < static_cast<int>(ncols); ++i)
marker[i] = -1;
// Initialize
IndexType new_A_cols = 0;
// Iterate over A
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
marker[ca] = 1;
++new_A_cols;
}
// Iterate over B
const IndexType row_begin_b = index1_b[ia];
const IndexType row_end_b = index1_b[ia+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
if (marker[cb] < 0) {
marker[cb] = 1;
++new_A_cols;
}
}
new_a_ptr[ia + 1] = new_A_cols;
}
}
// We initialize the sparse matrix
std::partial_sum(new_a_ptr, new_a_ptr + nrows + 1, new_a_ptr);
const SizeType nonzero_values = new_a_ptr[nrows];
IndexType* aux_index2_new_a = new IndexType[nonzero_values];
ValueType* aux_val_new_a = new ValueType[nonzero_values];
#pragma omp parallel
{
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
SignedIndexVectorType marker(ncols);
for (int i = 0; i < static_cast<int>(ncols); ++i)
marker[i] = -1;
// Initialize
const IndexType row_beg = new_a_ptr[ia];
IndexType row_end = row_beg;
// Iterate over A
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const ValueType va = values_a[ja];
marker[ca] = row_end;
aux_index2_new_a[row_end] = ca;
aux_val_new_a[row_end] = va;
++row_end;
}
// Iterate over B
const IndexType row_begin_b = index1_b[ia];
const IndexType row_end_b = index1_b[ia+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
const ValueType vb = values_b[jb];
if (marker[cb] < 0) {
marker[cb] = row_end;
aux_index2_new_a[row_end] = cb;
aux_val_new_a[row_end] = Factor * vb;
++row_end;
} else {
aux_val_new_a[marker[cb]] += Factor * vb;
}
}
}
}
// We reorder the rows
SortRows(new_a_ptr, nrows, ncols, aux_index2_new_a, aux_val_new_a);
// We fill the matrix
CreateSolutionMatrix(A, nrows, ncols, new_a_ptr, aux_index2_new_a, aux_val_new_a);
// Release memory
delete[] new_a_ptr;
delete[] aux_index2_new_a;
delete[] aux_val_new_a;
}
/**
* @brief This method computes of the transpose matrix of a given matrix
* @param rA The resulting matrix
* @param rB The second matrix to transpose
*/
template <class AMatrix, class BMatrix>
static void TransposeMatrix(
AMatrix& rA,
const BMatrix& rB,
const double Factor = 1.0
)
{
typedef typename value_type<AMatrix>::type ValueType;
// Get access to B data
const IndexType* index1 = rB.index1_data().begin();
const IndexType* index2 = rB.index2_data().begin();
const ValueType* data = rB.value_data().begin();
const SizeType transpose_nonzero_values = rB.value_data().end() - rB.value_data().begin();
const SizeType size_system_1 = rB.size1();
const SizeType size_system_2 = rB.size2();
if (rA.size1() != size_system_2 || rA.size2() != size_system_1 ) {
rA.resize(size_system_2, size_system_1, false);
}
IndexVectorType new_a_ptr(size_system_2 + 1);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_system_2 + 1); ++i)
new_a_ptr[i] = 0;
IndexVectorType aux_index2_new_a(transpose_nonzero_values);
DenseVector<ValueType> aux_val_new_a(transpose_nonzero_values);
#pragma omp parallel for
for (int i=0; i<static_cast<int>(size_system_1); ++i) {
IndexType row_begin = index1[i];
IndexType row_end = index1[i+1];
for (IndexType j=row_begin; j<row_end; j++) {
#pragma omp atomic
new_a_ptr[index2[j] + 1] += 1;
}
}
// We initialize the blocks sparse matrix
std::partial_sum(new_a_ptr.begin(), new_a_ptr.end(), &new_a_ptr[0]);
IndexVectorType aux_indexes(size_system_2);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_system_2); ++i)
aux_indexes[i] = 0;
// #pragma omp parallel for
for (int i=0; i<static_cast<int>(size_system_1); ++i) {
IndexType row_begin = index1[i];
IndexType row_end = index1[i+1];
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType current_row = index2[j];
const IndexType initial_position = new_a_ptr[current_row];
const IndexType current_index = initial_position + aux_indexes[current_row];
aux_index2_new_a[current_index] = i;
aux_val_new_a[current_index] = Factor * data[j];
// #pragma omp atomic
aux_indexes[current_row] += 1;
}
}
// We reorder the rows
SortRows(&new_a_ptr[0], size_system_2, size_system_1, &aux_index2_new_a[0], &aux_val_new_a[0]);
// We fill the matrix
CreateSolutionMatrix(rA, size_system_2, size_system_1, &new_a_ptr[0], &aux_index2_new_a[0], &aux_val_new_a[0]);
}
/**
* @brief This method is designed to create the final solution sparse matrix from the auxiliar values
* @param C The matrix solution
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param CPtr The indexes taht indicate the number of nonzero values in each column
* @param AuxIndex2C The indexes of the nonzero columns
* @param AuxValC The C array containing the values of the sparse matrix
*/
template <class CMatrix, typename TSize, typename Ptr, typename IndexType, typename ValueType>
static inline void CreateSolutionMatrix(
CMatrix& C,
const TSize NRows,
const TSize NCols,
const Ptr* CPtr,
const IndexType* AuxIndex2C,
const ValueType* AuxValC
)
{
// Exiting just in case of empty matrix
if ((NRows == 0) || (NCols == 0))
return void();
// Auxiliar values
const TSize nonzero_values = CPtr[NRows];
C = CMatrix(NRows, NCols, nonzero_values);
IndexType* index1_c = C.index1_data().begin();
IndexType* index2_c = C.index2_data().begin();
double* values_c = C.value_data().begin();
index1_c[0] = 0;
for (TSize i = 0; i < NRows; i++)
index1_c[i+1] = index1_c[i] + (CPtr[i+1] - CPtr[i]);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nonzero_values); i++) {
KRATOS_DEBUG_ERROR_IF(AuxIndex2C[i] > static_cast<IndexType>(NCols)) << "Index " << AuxIndex2C[i] <<" is greater than the number of columns " << NCols << std::endl;
index2_c[i] = AuxIndex2C[i];
values_c[i] = AuxValC[i];
}
C.set_filled(NRows+1, nonzero_values);
}
/**
* @brief This method is designed to reorder the rows by columns
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param CPtr The indexes taht indicate the number of nonzero values in each column
* @param Columns The columns of the problem
* @param Values The values (to be ordered with the rows)
*/
template <typename TSize, typename Col, typename TIndexType, typename ValueType>
static inline void SortRows(
const TIndexType* CPtr,
const TSize NRows,
const TSize NCols,
Col* Columns,
ValueType* Values
)
{
#pragma omp parallel
{
#pragma omp for
for (int i_row=0; i_row<static_cast<int>(NRows); i_row++) {
const TIndexType row_beg = CPtr[i_row];
const TIndexType row_end = CPtr[i_row + 1];
for(IndexType j = 1; j < row_end - row_beg; ++j) {
const IndexType c = Columns[j + row_beg];
const double v = Values[j + row_beg];
SignedIndexType i = j - 1;
while(i >= 0 && Columns[i + row_beg] > c) {
KRATOS_DEBUG_ERROR_IF(Columns[i + row_beg] > static_cast<Col>(NCols)) << " Index for column: " << i + row_beg << ". Index " << Columns[i + row_beg] <<" is greater than the number of columns " << NCols << std::endl;
Columns[i + 1 + row_beg] = Columns[i + row_beg];
Values[i + 1 + row_beg] = Values[i + row_beg];
i--;
}
Columns[i + 1 + row_beg] = c;
Values[i + 1 + row_beg] = v;
}
}
}
}
/**
* @brief This method assembles several sparse matrices into one large sparse matrix
* @param rMatricespBlocks The pointers to the matrices we are interested in assemble
* @param ContributionCoefficients The matrix containing the coefficients to be considered (copy, so we don't need to provide it)
* @param TransposeBlocks The matrix containing the flags telling us to transpose the blocks (copy, so we don't need to provide it)
*/
static inline void AssembleSparseMatrixByBlocks(
CompressedMatrix& rMatrix,
const DenseMatrix<CompressedMatrix*>& rMatricespBlocks,
DenseMatrix<double> ContributionCoefficients = DenseMatrix<double>(0,0),
DenseMatrix<bool> TransposeBlocks = DenseMatrix<bool>(0,0)
)
{
const SizeType number_of_rows_blocks = rMatricespBlocks.size1();
const SizeType number_of_columns_blocks = rMatricespBlocks.size2();
// Fill the matrices if they are empty
if (ContributionCoefficients.size1() == 0 && ContributionCoefficients.size2() == 0) {
ContributionCoefficients.resize(number_of_rows_blocks, number_of_columns_blocks);
for (IndexType i = 0; i < number_of_rows_blocks; ++i) {
for (IndexType j = 0; j < number_of_columns_blocks; ++j) {
ContributionCoefficients(i, j) = 1.0;
}
}
} else {
KRATOS_ERROR_IF(ContributionCoefficients.size1() != number_of_rows_blocks || ContributionCoefficients.size2() != number_of_columns_blocks) << "The ContributionCoefficients dimensions" << ContributionCoefficients.size1() << " and " << ContributionCoefficients.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl;
}
if (TransposeBlocks.size1() == 0 && TransposeBlocks.size2() == 0) {
TransposeBlocks.resize(number_of_rows_blocks, number_of_columns_blocks);
for (IndexType i = 0; i < number_of_rows_blocks; ++i) {
for (IndexType j = 0; j < number_of_rows_blocks; ++j) {
TransposeBlocks(i, j) = false;
}
}
} else {
KRATOS_ERROR_IF(TransposeBlocks.size1() != number_of_rows_blocks || TransposeBlocks.size2() != number_of_columns_blocks) << "The TransposeBlocks dimensions" << TransposeBlocks.size1() << " and " << TransposeBlocks.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl;
}
// Compute total size and check consistency of the different blocks
SizeType nrows = 0, ncols = 0;
std::vector<SizeType> row_sizes(number_of_rows_blocks);
std::vector<SizeType> column_sizes(number_of_columns_blocks);
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
if (TransposeBlocks(i, 0)) {
row_sizes[i] = (*rMatricespBlocks(i, 0)).size2();
} else {
row_sizes[i] = (*rMatricespBlocks(i, 0)).size1();
}
nrows += row_sizes[i];
}
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
if (TransposeBlocks(0, j)) {
column_sizes[j] = (*rMatricespBlocks(0, j)).size1();
} else {
column_sizes[j] = (*rMatricespBlocks(0, j)).size2();
}
ncols += column_sizes[j];
}
// Check consistency of all blocks
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
if (TransposeBlocks(i, j)) {
KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size2() != row_sizes[i] || (*rMatricespBlocks(i, j)).size1() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size2() << ", " << (*rMatricespBlocks(i, j)).size1() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl;
} else {
KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size1() != row_sizes[i] || (*rMatricespBlocks(i, j)).size2() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size1() << ", " << (*rMatricespBlocks(i, j)).size2() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl;
}
}
}
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// We will compute nonzero terms
IndexType* matrix_ptr = new IndexType[nrows + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nrows + 1); ++i)
matrix_ptr[i] = 0;
#ifdef KRATOS_DEBUG
IndexType check_non_zero = 0;
DenseMatrix<IndexType> check_non_zero_blocks(number_of_rows_blocks, number_of_columns_blocks);
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
check_non_zero_blocks(i, j) = 0;
}
}
#endif
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) {
IndexType matrix_cols_aux = 0;
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
#ifdef KRATOS_DEBUG
IndexType partial_matrix_cols_aux = 0;
#endif
// Skip if empty matrix
CompressedMatrix& r_matrix = *rMatricespBlocks(i, j);
if (r_matrix.nnz() > 0) {
if (TransposeBlocks(i, j)) {
// We compute the transposed matrix
const SizeType size_system_1 = r_matrix.size1();
const SizeType size_system_2 = r_matrix.size2();
CompressedMatrix transpose(size_system_2, size_system_1);
TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix);
ComputeNonZeroBlocks(transpose, k, matrix_cols_aux);
#ifdef KRATOS_DEBUG
ComputeNonZeroBlocks(transpose, k, partial_matrix_cols_aux);
#endif
} else {
ComputeNonZeroBlocks(r_matrix, k, matrix_cols_aux);
#ifdef KRATOS_DEBUG
ComputeNonZeroBlocks(r_matrix, k, partial_matrix_cols_aux);
#endif
}
}
#ifdef KRATOS_DEBUG
check_non_zero_blocks(i, j) += partial_matrix_cols_aux;
#endif
}
IndexType& r_matrix_ptr_value = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k + 1];
#pragma omp atomic
r_matrix_ptr_value += matrix_cols_aux;
#ifdef KRATOS_DEBUG
#pragma omp atomic
check_non_zero += matrix_cols_aux;
#endif
}
}
}
// Auxiliar values
std::partial_sum(matrix_ptr, matrix_ptr + nrows + 1, matrix_ptr);
const SizeType nonzero_values = matrix_ptr[nrows];
#ifdef KRATOS_DEBUG
SizeType total_nnz = 0;
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
const SizeType block_nnz = rMatricespBlocks(i, j)->nnz();
KRATOS_ERROR_IF_NOT(check_non_zero_blocks(i, j) == block_nnz) << "Inconsistent number of non-zero values. Check 0: " << block_nnz << " vs " << check_non_zero_blocks(i, j) << ". Block: " << i << ", " << j << std::endl;
total_nnz += block_nnz;
}
}
KRATOS_ERROR_IF_NOT(check_non_zero == total_nnz) << "Inconsistent number of non-zero values. Check 1: " << total_nnz << " vs " << check_non_zero << std::endl;
KRATOS_ERROR_IF_NOT(nonzero_values == total_nnz) << "Inconsistent number of non-zero values. Check 2: " << total_nnz << " vs " << nonzero_values << std::endl;
#endif
// Initialize matrix with the corresponding non-zero values
rMatrix = CompressedMatrix(nrows, ncols, nonzero_values);
// Fill the new matrix
double* Matrix_values = rMatrix.value_data().begin();
IndexType* Matrix_index1 = rMatrix.index1_data().begin();
IndexType* Matrix_index2 = rMatrix.index2_data().begin();
Matrix_index1[0] = 0;
for (IndexType i = 0; i < nrows; ++i)
Matrix_index1[i+1] = Matrix_index1[i] + (matrix_ptr[i + 1] - matrix_ptr[i]);
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) {
const IndexType row_beg = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k];
IndexType row_end = row_beg;
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
const SizeType initial_index_column = std::accumulate(column_sizes.begin(), column_sizes.begin() + j, 0);
// Skip if empty matrix
CompressedMatrix& r_matrix = *rMatricespBlocks(i, j);
if (r_matrix.nnz() > 0) {
if (TransposeBlocks(i, j)) {
// We compute the transposed matrix
const SizeType size_system_1 = r_matrix.size1();
const SizeType size_system_2 = r_matrix.size2();
CompressedMatrix transpose(size_system_2, size_system_1);
TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix);
ComputeAuxiliarValuesBlocks(transpose, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j));
} else {
ComputeAuxiliarValuesBlocks(r_matrix, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j));
}
}
}
}
}
}
// Close the matrix
rMatrix.set_filled(nrows+1, nonzero_values);
// Release memory
delete[] matrix_ptr;
}
/**
* @brief This is a method to check the block containing nonzero values
* @param rMatrix The auxiliar block
* @param CurrentRow The current row computed
* @param rNonZeroColsAux2 The nonzero rows array
*/
static inline void ComputeNonZeroBlocks(
const CompressedMatrix& rMatrix,
const int CurrentRow,
IndexType& rNonZeroColsAux2
)
{
// Get access to aux_K data
const IndexType* aux_matrix_index1 = rMatrix.index1_data().begin();
const IndexType row_begin = aux_matrix_index1[CurrentRow];
const IndexType row_end = aux_matrix_index1[CurrentRow + 1];
for (IndexType j=row_begin; j<row_end; j++) {
++rNonZeroColsAux2;
}
}
/**
* @brief This is a method to compute the contribution of the auxiliar blocks
* @param AuxK The auxiliar block
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
* @param CurrentRow The current row computed
* @param RowEnd The last column computed
* @param InitialIndexColumn The initial column index of the auxiliar block in the final matrix
*/
static inline void ComputeAuxiliarValuesBlocks(
const CompressedMatrix& rMatrix,
IndexType* AuxIndex2,
double* AuxVals,
const int CurrentRow,
IndexType& RowEnd,
const SizeType InitialIndexColumn,
const double ContributionCoefficient = 1.0
)
{
// Get access to aux_K data
const double* aux_values = rMatrix.value_data().begin();
const IndexType* aux_Matrix_index1 = rMatrix.index1_data().begin();
const IndexType* aux_Matrix_index2 = rMatrix.index2_data().begin();
const IndexType aux_Matrix_row_begin = aux_Matrix_index1[CurrentRow];
const IndexType aux_Matrix_row_end = aux_Matrix_index1[CurrentRow + 1];
for (IndexType j=aux_Matrix_row_begin; j<aux_Matrix_row_end; j++) {
const IndexType col_index = InitialIndexColumn + aux_Matrix_index2[j];
AuxIndex2[RowEnd] = col_index;
AuxVals[RowEnd] = ContributionCoefficient * aux_values[j];
++RowEnd;
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const
{
return "SparseMatrixMultiplicationUtility";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const
{
rOStream << "SparseMatrixMultiplicationUtility";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method is oriented to merge rows
* @param Column1 The index of the first matrix column
* @param Column1End The last index of the first matrix column
* @param Column2 The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Column3 The index of the third matrix column
* @return The resulting row
*/
template <bool TNeedOut, class TIndex>
static TIndex* MergeRows(
const TIndex* Column1,
const TIndex* Column1End,
const TIndex* Column2,
const TIndex* Column2End,
TIndex* Column3
)
{
while(Column1 != Column1End && Column2 != Column2End) {
TIndex c1 = *Column1;
TIndex c2 = *Column2;
if (c1 < c2) {
if (TNeedOut) *Column3 = c1;
++Column1;
} else if (c1 == c2) {
if (TNeedOut) *Column3 = c1;
++Column1;
++Column2;
} else {
if (TNeedOut) *Column3 = c2;
++Column2;
}
++Column3;
}
if (TNeedOut) {
if (Column1 < Column1End) {
return std::copy(Column1, Column1End, Column3);
} else if (Column2 < Column2End) {
return std::copy(Column2, Column2End, Column3);
} else {
return Column3;
}
} else {
return Column3 + (Column1End - Column1) + (Column2End - Column2);
}
}
/**
* @brief This method is oriented to merge rows
* @param rAlpha1 The coefficient of the first matrix
* @param Column1 The index of the first matrix column
* @param Column1End The last index of the first matrix column
* @param Value1 The values of the first matrix
* @param rAlpha2 The coefficient of the second matrix
* @param Column2 The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Value2 The values of the second matrix
* @param Column3 The index of the third matrix column
* @param Value3 The values of the third matrix
* @return The resulting row
*/
template <class TIndex, class TValueType>
static TIndex* MergeRows(
const TValueType &rAlpha1,
const TIndex* Column1,
const TIndex* Column1End,
const TValueType *Value1,
const TValueType &rAlpha2,
const TIndex* Column2,
const TIndex* Column2End,
const TValueType *Value2,
TIndex* Column3,
TValueType *Value3
)
{
while(Column1 != Column1End && Column2 != Column2End) {
TIndex c1 = *Column1;
TIndex c2 = *Column2;
if (c1 < c2) {
++Column1;
*Column3 = c1;
*Value3 = rAlpha1 * (*Value1++);
} else if (c1 == c2) {
++Column1;
++Column2;
*Column3 = c1;
*Value3 = rAlpha1 * (*Value1++) + rAlpha2 * (*Value2++);
} else {
++Column2;
*Column3 = c2;
*Value3 = rAlpha2 * (*Value2++);
}
++Column3;
++Value3;
}
while(Column1 < Column1End) {
*Column3++ = *Column1++;
*Value3++ = rAlpha1 * (*Value1++);
}
while(Column2 < Column2End) {
*Column3++ = *Column2++;
*Value3++ = rAlpha2 * (*Value2++);
}
return Column3;
}
/**
* @brief This method is oriented to multiply rows
* @param AColumn The index of the first matrix column
* @param AColumnEnd The last index of the first matrix column
* @param BPtr The array constining the nonzero values per row of the second matrix
* @param BColumn The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Tmp1Column Indexes of the columns of first matrix
* @param Tmp2Column Indexes of the columns of second matrix
* @param Tmp3Column Indexes of the columns of third matrix
* @return The resulting row
*/
template <class TIndex>
static TIndex ProdRowWidth(
const TIndex* AColumn,
const TIndex* AColumnEnd,
const TIndex* BPtr,
const TIndex* BColumn,
TIndex* Tmp1Column,
TIndex* Tmp2Column,
TIndex* Tmp3Column
)
{
const TIndex nrow = AColumnEnd - AColumn;
/* No rows to merge, nothing to do */
if (nrow == 0) return 0;
/* Single row, just copy it to output */
if (nrow == 1) return BPtr[*AColumn + 1] - BPtr[*AColumn];
/* Two rows, merge them */
if (nrow == 2) {
int a1 = AColumn[0];
int a2 = AColumn[1];
return MergeRows<false>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column) - Tmp1Column;
}
/* Generic case (more than two rows).
*
* Merge rows by pairs, then merge the results together.
* When merging two rows, the result is always wider (or equal).
* Merging by pairs allows to work with short rows as often as possible.
*/
// Merge first two.
TIndex a1 = *AColumn++;
TIndex a2 = *AColumn++;
TIndex c_col1 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column ) - Tmp1Column;
// Go by pairs.
while(AColumn + 1 < AColumnEnd) {
a1 = *AColumn++;
a2 = *AColumn++;
TIndex c_col2 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column;
if (AColumn == AColumnEnd) {
return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column;
} else {
c_col1 = MergeRows<true>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column;
std::swap(Tmp1Column, Tmp3Column);
}
}
// Merge the tail.
a2 = *AColumn;
return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column;
}
/**
* @brief This method is oriented to multiply rows
* @param AColumn The index of the first matrix column
* @param AColumnEnd The last index of the first matrix column
* @param AValue The values of the first matrix
* @param BPtr The array constining the nonzero values per row of the second matrix
* @param BColumn The index of the second matrix column
* @param BValue The values of the second matrix
* @param OutColumn Indexes of the columns of output matrix
* @param OutValue Values of the columns of output matrix
* @param Tmp2Column Indexes of the columns of second matrix
* @param Tmp2Value Values of the columns of second matrix
* @param Tmp3Column Indexes of the columns of third matrix
* @param Tmp3Value Values of the columns of third matrix
* @return The resulting row
*/
template <class TIndex, class TValueType>
static void ProdRow(
const TIndex* AColumn,
const TIndex* AColumnEnd,
const TValueType *AValue,
const TIndex* BPtr,
const TIndex* BColumn,
const TValueType *BValue,
TIndex* OutColumn,
TValueType *OutValue,
TIndex* Tmp2Column,
TValueType *Tmp2Value,
TIndex* Tmp3Column,
TValueType *Tmp3Value
)
{
const TIndex nrow = AColumnEnd - AColumn;
/* No rows to merge, nothing to do */
if (nrow == 0) return;
/* Single row, just copy it to output */
if (nrow == 1) {
TIndex ac = *AColumn;
TValueType av = *AValue;
const TValueType *bv = BValue + BPtr[ac];
const TIndex* bc = BColumn + BPtr[ac];
const TIndex* be = BColumn + BPtr[ac+1];
while(bc != be) {
*OutColumn++ = *bc++;
*OutValue++ = av * (*bv++);
}
return;
}
/* Two rows, merge them */
if (nrow == 2) {
TIndex ac1 = AColumn[0];
TIndex ac2 = AColumn[1];
TValueType av1 = AValue[0];
TValueType av2 = AValue[1];
MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], OutColumn, OutValue );
}
/* Generic case (more than two rows).
*
* Merge rows by pairs, then merge the results together.
* When merging two rows, the result is always wider (or equal).
* Merging by pairs allows to work with short rows as often as possible.
*/
// Merge first two.
TIndex ac1 = *AColumn++;
TIndex ac2 = *AColumn++;
TValueType av1 = *AValue++;
TValueType av2 = *AValue++;
TIndex* tm1_col = OutColumn;
TValueType *tm1_val = OutValue;
TIndex c_col1 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], tm1_col, tm1_val ) - tm1_col;
// Go by pairs.
while(AColumn + 1 < AColumnEnd) {
ac1 = *AColumn++;
ac2 = *AColumn++;
av1 = *AValue++;
av2 = *AValue++;
TIndex c_col2 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp2Column, Tmp2Value ) - Tmp2Column;
c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, amgcl::math::identity<TValueType>(), Tmp2Column, Tmp2Column + c_col2, Tmp2Value, Tmp3Column, Tmp3Value ) - Tmp3Column;
std::swap(Tmp3Column, tm1_col);
std::swap(Tmp3Value, tm1_val);
}
// Merge the tail if there is one.
if (AColumn < AColumnEnd) {
ac2 = *AColumn++;
av2 = *AValue++;
c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp3Column, Tmp3Value ) - Tmp3Column;
std::swap(Tmp3Column, tm1_col);
std::swap(Tmp3Value, tm1_val);
}
// If we are lucky, tm1 now points to out.
// Otherwise, copy the results.
if (tm1_col != OutColumn) {
std::copy(tm1_col, tm1_col + c_col1, OutColumn);
std::copy(tm1_val, tm1_val + c_col1, OutValue);
}
return;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class SparseMatrixMultiplicationUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
// /****************************** INPUT STREAM FUNCTION ******************************/
// /***********************************************************************************/
//
// template<class TPointType, class TPointerType>
// inline std::istream& operator >> (std::istream& rIStream,
// SparseMatrixMultiplicationUtility& rThis);
//
// /***************************** OUTPUT STREAM FUNCTION ******************************/
// /***********************************************************************************/
//
// template<class TPointType, class TPointerType>
// inline std::ostream& operator << (std::ostream& rOStream,
// const SparseMatrixMultiplicationUtility& rThis)
// {
// return rOStream;
// }
///@}
} // namespace Kratos.
#endif // KRATOS_TREE_CONTACT_SEARCH_H_INCLUDED defined
|
algebraic_flux_corrected_steady_scalar_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Suneth Warnakulasuriya
//
#if !defined(KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME)
#define KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "utilities/openmp_utils.h"
#include "utilities/parallel_utilities.h"
// Application includes
#include "custom_strategies/relaxed_dof_updater.h"
#include "rans_application_variables.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/**
* @brief Algebraic flux corrected scalar steady transport scheme.
*
* This scheme is based on following publication.
*
* D. Kuzmin, Algebraic flux correction for finite element discretizations of coupled systems,
* Computational Methods for Coupled Problems in Science and Engineering II, CIMNE,
* Barcelona, (2007), pp. 653–656.
*
* This scheme can only be used to solve steady state problems with with elements derrived
* from ConvectionDiffusionReactionElement.
*
* @tparam TSparseSpace Sparse space type
* @tparam TDenseSpace Dense space type
*
* @see ConvectionDiffusionReactionElement
*/
template <class TSparseSpace, class TDenseSpace>
class AlgebraicFluxCorrectedSteadyScalarScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(AlgebraicFluxCorrectedSteadyScalarScheme);
using BaseType = Scheme<TSparseSpace, TDenseSpace>;
using DofsArrayType = typename BaseType::DofsArrayType;
using TSystemMatrixType = typename BaseType::TSystemMatrixType;
using TSystemVectorType = typename BaseType::TSystemVectorType;
using LocalSystemVectorType = typename BaseType::LocalSystemVectorType;
using LocalSystemMatrixType = typename BaseType::LocalSystemMatrixType;
///@}
///@name Life Cycle
///@{
AlgebraicFluxCorrectedSteadyScalarScheme(
const double RelaxationFactor,
const Flags BoundaryFlags)
: BaseType(),
mRelaxationFactor(RelaxationFactor),
mBoundaryFlags(BoundaryFlags),
mrPeriodicIdVar(Variable<int>::StaticObject())
{
KRATOS_INFO("AlgebraicFluxCorrectedSteadyScalarScheme")
<< " Using residual based algebraic flux corrected scheme with "
"relaxation "
"factor = "
<< std::scientific << mRelaxationFactor << "\n";
mpDofUpdater = Kratos::make_unique<DofUpdaterType>(mRelaxationFactor);
}
AlgebraicFluxCorrectedSteadyScalarScheme(
const double RelaxationFactor,
const Flags BoundaryFlags,
const Variable<int>& rPeriodicIdVar)
: BaseType(),
mRelaxationFactor(RelaxationFactor),
mBoundaryFlags(BoundaryFlags),
mrPeriodicIdVar(rPeriodicIdVar)
{
KRATOS_INFO("AlgebraicFluxCorrectedSteadyScalarScheme")
<< " Using periodic residual based algebraic flux corrected scheme "
"with relaxation "
"factor = "
<< std::scientific << mRelaxationFactor << "\n";
mpDofUpdater = Kratos::make_unique<DofUpdaterType>(mRelaxationFactor);
}
~AlgebraicFluxCorrectedSteadyScalarScheme() override = default;
///@}
///@name Operators
///@{
void Initialize(ModelPart& rModelPart) override
{
KRATOS_TRY
BaseType::Initialize(rModelPart);
block_for_each(rModelPart.Nodes(), [&](ModelPart::NodeType& rNode) {
rNode.SetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX, 0.0);
rNode.SetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX, 0.0);
rNode.SetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT, 0.0);
rNode.SetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT, 0.0);
});
if (mrPeriodicIdVar != Variable<int>::StaticObject()) {
block_for_each(rModelPart.Conditions(), [&](const ModelPart::ConditionType& rCondition) {
if (rCondition.Is(PERIODIC)) {
// this only supports 2 noded periodic conditions
KRATOS_ERROR_IF(rCondition.GetGeometry().PointsNumber() != 2)
<< this->Info() << " only supports two noded periodic conditions. Found "
<< rCondition.Info() << " with "
<< rCondition.GetGeometry().PointsNumber() << " nodes.\n";
const auto& r_node_0 = rCondition.GetGeometry()[0];
const std::size_t r_node_0_pair_id =
r_node_0.FastGetSolutionStepValue(mrPeriodicIdVar);
const auto& r_node_1 = rCondition.GetGeometry()[1];
const std::size_t r_node_1_pair_id =
r_node_1.FastGetSolutionStepValue(mrPeriodicIdVar);
KRATOS_ERROR_IF(r_node_0_pair_id != r_node_1.Id())
<< "Periodic condition pair id mismatch in "
<< mrPeriodicIdVar.Name() << ". [ " << r_node_0_pair_id
<< " != " << r_node_1.Id() << " ].\n";
KRATOS_ERROR_IF(r_node_1_pair_id != r_node_0.Id())
<< "Periodic condition pair id mismatch in "
<< mrPeriodicIdVar.Name() << ". [ " << r_node_1_pair_id
<< " != " << r_node_0.Id() << " ].\n";
}
});
}
// Allocate auxiliary memory.
const auto num_threads = OpenMPUtils::GetNumThreads();
mAntiDiffusiveFlux.resize(num_threads);
mAntiDiffusiveFluxCoefficients.resize(num_threads);
mValues.resize(num_threads);
mAuxMatrix.resize(num_threads);
KRATOS_CATCH("");
}
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
auto& r_nodes = rModelPart.Nodes();
block_for_each(r_nodes, [&](ModelPart::NodeType& rNode) {
rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = 0.0;
rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = 0.0;
rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = 0.0;
rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = 0.0;
});
auto& r_elements = rModelPart.Elements();
const int number_of_elements = r_elements.size();
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel
{
Matrix left_hand_side, artificial_diffusion, aux_matrix;
Vector right_hand_side, values;
std::vector<IndexType> equation_ids;
#pragma omp for
for (int i = 0; i < number_of_elements; ++i) {
auto& r_element = *(r_elements.begin() + i);
this->CalculateSystemMatrix<Element>(r_element, left_hand_side,
right_hand_side, aux_matrix,
r_current_process_info);
this->CalculateArtificialDiffusionMatrix(artificial_diffusion, left_hand_side);
r_element.EquationIdVector(equation_ids, r_current_process_info);
r_element.GetValuesVector(values);
const int size = artificial_diffusion.size1();
Vector p_plus = ZeroVector(size);
Vector p_minus = ZeroVector(size);
Vector q_plus = ZeroVector(size);
Vector q_minus = ZeroVector(size);
auto& r_geometry = r_element.GetGeometry();
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; j++) {
if (i != j) {
const double f_ij = artificial_diffusion(i, j) *
(values[j] - values[i]);
if (left_hand_side(j, i) <= left_hand_side(i, j)) {
p_plus[i] += std::max(0.0, f_ij);
p_minus[i] -= std::max(0.0, -f_ij);
}
if (equation_ids[i] < equation_ids[j]) {
q_plus[i] += std::max(0.0, -f_ij);
q_minus[i] -= std::max(0.0, f_ij);
q_plus[j] += std::max(0.0, f_ij);
q_minus[j] -= std::max(0.0, -f_ij);
}
}
}
}
for (int i = 0; i < size; ++i) {
auto& r_node = r_geometry[i];
r_node.SetLock();
r_node.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) += p_plus[i];
r_node.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) += q_plus[i];
r_node.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) += p_minus[i];
r_node.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) += q_minus[i];
r_node.UnSetLock();
}
}
}
if (mrPeriodicIdVar != Variable<int>::StaticObject()) {
block_for_each(rModelPart.Conditions(), [&](ModelPart::ConditionType& rCondition) {
if (rCondition.Is(PERIODIC)) {
auto& r_node_0 = rCondition.GetGeometry()[0];
auto& r_node_1 = rCondition.GetGeometry()[1];
double p_plus = r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX);
double q_plus = r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
double p_minus = r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX);
double q_minus = r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
p_plus += r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX);
q_plus += r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
p_minus += r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX);
q_minus += r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
r_node_0.SetLock();
r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = p_plus;
r_node_0.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_plus;
r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = p_minus;
r_node_0.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_minus;
r_node_0.UnSetLock();
r_node_1.SetLock();
r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX) = p_plus;
r_node_1.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_plus;
r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX) = p_minus;
r_node_1.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT) = q_minus;
r_node_1.UnSetLock();
}
});
}
Communicator& r_communicator = rModelPart.GetCommunicator();
r_communicator.AssembleNonHistoricalData(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX);
r_communicator.AssembleNonHistoricalData(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
r_communicator.AssembleNonHistoricalData(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX);
r_communicator.AssembleNonHistoricalData(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
KRATOS_CATCH("")
}
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY;
mpDofUpdater->UpdateDofs(rDofSet, rDx);
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
void CalculateSystemContributions(
Element& rElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const auto k = OpenMPUtils::ThisThread();
this->CalculateSystemMatrix<Element>(rElement, rLHS_Contribution, rRHS_Contribution,
mAuxMatrix[k], rCurrentProcessInfo);
rElement.EquationIdVector(rEquationIdVector, rCurrentProcessInfo);
this->CalculateArtificialDiffusionMatrix(mAuxMatrix[k], rLHS_Contribution);
AddAntiDiffusiveFluxes(rRHS_Contribution, rLHS_Contribution, rElement,
mAuxMatrix[k]);
noalias(rLHS_Contribution) += mAuxMatrix[k];
rElement.GetValuesVector(mValues[k]);
noalias(rRHS_Contribution) -= prod(rLHS_Contribution, mValues[k]);
KRATOS_CATCH("");
}
void CalculateSystemContributions(
Condition& rCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const auto k = OpenMPUtils::ThisThread();
this->CalculateSystemMatrix<Condition>(rCondition, rLHS_Contribution, rRHS_Contribution,
mAuxMatrix[k], rCurrentProcessInfo);
rCondition.EquationIdVector(rEquationIdVector, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void CalculateRHSContribution(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const auto k = OpenMPUtils::ThisThread();
CalculateSystemContributions(rElement, mAuxMatrix[k], rRHS_Contribution,
rEquationIdVector, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void CalculateRHSContribution(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
const auto k = OpenMPUtils::ThisThread();
CalculateSystemContributions(rCondition, mAuxMatrix[k], rRHS_Contribution,
rEquationIdVector, rCurrentProcessInfo);
KRATOS_CATCH("");
}
///@}
protected:
///@name Protected Operators
///@{
///@}
private:
///@name Member Variables
///@{
using DofUpdaterType = RelaxedDofUpdater<TSparseSpace>;
using DofUpdaterPointerType = typename DofUpdaterType::UniquePointer;
DofUpdaterPointerType mpDofUpdater;
double mRelaxationFactor;
const Flags mBoundaryFlags;
const Variable<int>& mrPeriodicIdVar;
std::vector<LocalSystemMatrixType> mAuxMatrix;
std::vector<LocalSystemMatrixType> mAntiDiffusiveFluxCoefficients;
std::vector<LocalSystemMatrixType> mAntiDiffusiveFlux;
std::vector<LocalSystemVectorType> mValues;
/**
* @brief Common method to calculate Element and Condition system matrices
*
* @tparam TItem Type of item (can be ElementType or ConditionType)
* @param rItem Item instance
* @param rLeftHandSide Lefthandside matrix
* @param rRightHandSide Righthandside vector
* @param rAuxMatrix Auxiliary matrix
* @param rCurrentProcessInfo Current process info
*/
template <typename TItem>
void CalculateSystemMatrix(
TItem& rItem,
LocalSystemMatrixType& rLeftHandSide,
LocalSystemVectorType& rRightHandSide,
LocalSystemMatrixType& rAuxMatrix,
const ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY
rItem.InitializeNonLinearIteration(rCurrentProcessInfo);
rItem.CalculateLocalSystem(rLeftHandSide, rRightHandSide, rCurrentProcessInfo);
rItem.CalculateLocalVelocityContribution(rAuxMatrix, rRightHandSide, rCurrentProcessInfo);
if (rAuxMatrix.size1() != 0) {
noalias(rLeftHandSide) += rAuxMatrix;
}
KRATOS_CATCH("");
}
/**
* @brief Calculates artificial diffusion matrix for given discretized matrix
*
* @param rOutput Diffusion matrix
* @param rInput Input matrix
*/
void CalculateArtificialDiffusionMatrix(
Matrix& rOutput,
const Matrix& rInput)
{
const IndexType size = rInput.size1();
if (rOutput.size1() != size || rOutput.size2() != size) {
rOutput.resize(size, size, false);
}
rOutput = ZeroMatrix(size, size);
for (IndexType i = 0; i < size; ++i) {
for (IndexType j = i + 1; j < size; ++j) {
rOutput(i, j) = -std::max(std::max(rInput(i, j), rInput(j, i)), 0.0);
rOutput(j, i) = rOutput(i, j);
}
}
for (IndexType i = 0; i < size; ++i) {
double value = 0.0;
for (IndexType j = 0; j < size; ++j) {
value -= rOutput(i, j);
}
rOutput(i, i) = value;
}
}
/**
* @brief Calculates anti-diffusive terms
*
* Diffusion calculated by CalculateArtificialDiffusionMatrix alters original problem. Therefore
* anti-diffusion terms are calculated to cancel diffusion terms where they are not necessary for
* stabilization of the Convection-Diffusion-Reaction scalar equation.
*
* @tparam TItem Item type (can be ElementType or ConditionType)
* @param rRHS Righthandside vector
* @param rLHS Lefthandside matrix
* @param rItem Item instance
* @param rArtificialDiffusion Calculated artificial diffusion
*/
template <typename TItem>
void AddAntiDiffusiveFluxes(
Vector& rRHS,
const Matrix& rLHS,
TItem& rItem,
const Matrix& rArtificialDiffusion)
{
KRATOS_TRY
const auto k = OpenMPUtils::ThisThread();
const auto size = rRHS.size();
auto& r_anti_diffusive_flux_coefficients = mAntiDiffusiveFluxCoefficients[k];
auto& r_anti_diffusive_flux = mAntiDiffusiveFlux[k];
auto& r_values = mValues[k];
rItem.GetValuesVector(r_values);
if (r_anti_diffusive_flux_coefficients.size1() != size ||
r_anti_diffusive_flux_coefficients.size2() != size) {
r_anti_diffusive_flux_coefficients.resize(size, size, false);
}
if (r_anti_diffusive_flux.size1() != size || r_anti_diffusive_flux.size2() != size) {
r_anti_diffusive_flux.resize(size, size, false);
}
noalias(r_anti_diffusive_flux_coefficients) = ZeroMatrix(size, size);
noalias(r_anti_diffusive_flux) = ZeroMatrix(size, size);
for (IndexType i = 0; i < size; ++i) {
const auto& r_node_i = rItem.GetGeometry()[i];
double r_plus_i{0.0}, r_minus_i{0.0};
CalculateAntiDiffusiveFluxR(r_plus_i, r_minus_i, r_node_i);
for (IndexType j = 0; j < size; ++j) {
if (i != j) {
r_anti_diffusive_flux(i, j) =
rArtificialDiffusion(i, j) * (r_values[j] - r_values[i]);
if (rLHS(j, i) <= rLHS(i, j)) {
if (r_anti_diffusive_flux(i, j) > 0.0) {
r_anti_diffusive_flux_coefficients(i, j) = r_plus_i;
} else if (r_anti_diffusive_flux(i, j) < 0.0) {
r_anti_diffusive_flux_coefficients(i, j) = r_minus_i;
} else {
r_anti_diffusive_flux_coefficients(i, j) = 1.0;
}
r_anti_diffusive_flux_coefficients(j, i) =
r_anti_diffusive_flux_coefficients(i, j);
}
}
}
}
for (IndexType i = 0; i < size; ++i) {
for (IndexType j = 0; j < size; ++j) {
rRHS[i] += r_anti_diffusive_flux_coefficients(i, j) *
r_anti_diffusive_flux(i, j);
}
}
KRATOS_CATCH("");
}
/**
* @brief Calculates allowed artifical diffusive fluxes
*
* @param rRPlus Allowed positive fluxes
* @param rRMinus Allowed negative fluxes
* @param rNode Node
*/
void CalculateAntiDiffusiveFluxR(
double& rRPlus,
double& rRMinus,
const ModelPart::NodeType& rNode) const
{
if (rNode.Is(mBoundaryFlags)) {
rRMinus = 1.0;
rRPlus = 1.0;
} else {
const double q_plus = rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
const double p_plus = rNode.GetValue(AFC_POSITIVE_ANTI_DIFFUSIVE_FLUX);
const double q_minus = rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX_LIMIT);
const double p_minus = rNode.GetValue(AFC_NEGATIVE_ANTI_DIFFUSIVE_FLUX);
rRPlus = 1.0;
if (p_plus > 0.0) {
rRPlus = std::min(1.0, q_plus / p_plus);
}
rRMinus = 1.0;
if (p_minus < 0.0) {
rRMinus = std::min(1.0, q_minus / p_minus);
}
}
}
///@}
}; // namespace Kratos
///@}
} // namespace Kratos
#endif /* KRATOS_ALGEBRAIC_FLUX_CORRECTED_SCALAR_STEADY_SCHEME defined */
|
symgs.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){
fprintf(stderr, "symgs smooth\n");
int box,s;
for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps (forward/backward) per GS smooth
exchange_boundary(level,phi_id,stencil_is_star_shaped());
apply_BCs(level,phi_id,stencil_is_star_shaped());
uint64_t _timeStart = CycleTime();
// #pragma omp parallel for private(box)
hclib::finish([] {
hclib::loop_domain_1d loop(level->num_my_boxes);
hclib::forasync_nb(&loop, [] (int box) {
int i,j,k;
const int ghosts = level->box_ghosts;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int dim = level->my_boxes[box].dim;
const double h2inv = 1.0/(level->h*level->h);
double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
if( (s&0x1)==0 ){ // forward sweep... hard to thread
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}
}else{ // backward sweep... hard to thread
for(k=dim-1;k>=0;k--){
for(j=dim-1;j>=0;j--){
for(i=dim-1;i>=0;i--){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}
}
}, false, FORASYNC_MODE_FLAT);
});
level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
drupal7_fmt_plug.c | /*
* Drupal 7 phpass variant using SHA-512 and hashes cut at 258 bits.
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* These are 8 byte salted hashes with a loop count that defines the number
* of loops to compute. Drupal uses 258 bits of the hash, this is a multiple of
* 6 but not 8. I presume this is for getting unpadded base64. Anyway we store
* an extra byte but for now we will only compare 256 bits. I doubt that will
* pose any problems. Actually I'm not quite sure the last bits end up correct
* from the current version of get_binary().
*
* Based on [old thick] phpass-md5.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_drupal7;
#elif FMT_REGISTERS_H
john_register_one(&fmt_drupal7);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Drupal7"
#define FORMAT_NAME "$S$"
#define FORMAT_TAG "$S$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (x16385)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 47
#define CIPHERTEXT_LENGTH 55
#define DIGEST_SIZE (512/8)
#define BINARY_SIZE (258/8) // ((258+7)/8)
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$S$CwkjgAKeSx2imSiN3SyBEg8e0sgE2QOx4a/VIfCHN0BZUNAWCr1X", "virtualabc"},
{"$S$CFURCPa.k6FAEbJPgejaW4nijv7rYgGc4dUJtChQtV4KLJTPTC/u", "password"},
{"$S$C6x2r.aW5Nkg7st6/u.IKWjTerHXscjPtu4spwhCVZlP89UKcbb/", "NEW_TEMP_PASSWORD"},
{NULL}
};
/*
* NOTE, due to the 0x4000 iteration count, I am not wasting time pre-loading
* keys/salts. We will simply add SIMD code to the crypt_all. We could only
* gain < .1% worrying about all the extra stuff from set_key, get_key, the
* hashes, etc needed to split out SIMD. We just keep all input data in 'flat'
* format, switch to SIMD, do the 0x4000 loops, and put output back into 'flat'
* layout again. So we have no 'static' SIMD objects.
*/
static unsigned char *cursalt;
static unsigned loopCnt;
static unsigned char (*EncKey)[PLAINTEXT_LENGTH + 1];
static unsigned int *EncKeyLen;
static char (*crypt_key)[DIGEST_SIZE];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
EncKey = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKey));
EncKeyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKeyLen));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(EncKeyLen);
MEM_FREE(EncKey);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
unsigned count_log2;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
for (i = FORMAT_TAG_LEN; i < CIPHERTEXT_LENGTH; ++i)
if (atoi64[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
count_log2 = atoi64[ARCH_INDEX(ciphertext[3])];
if (count_log2 < 7 || count_log2 > 31)
return 0;
return 1;
}
static void set_salt(void *salt)
{
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
cursalt = salt;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
EncKeyLen[index] = len;
memcpy(((char*)EncKey[index]), key, len + 1);
}
static char *get_key(int index)
{
return (char*)EncKey[index];
}
static int cmp_all(void *binary, int count)
{
int index;
for(index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
#ifdef SIMD_COEF_64
unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;
ARCH_WORD_64 *keys64;
unsigned i, j, len, Lcount = loopCnt;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (ARCH_WORD_64*)keys;
memset(keys, 0, 128*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < 8; ++j)
keys[GETPOS(j, i)] = cursalt[j];
for (j = 0; j < len; ++j)
keys[GETPOS(j+8, i)] = EncKey[index+i][j];
keys[GETPOS(j+8, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < len; ++j)
keys[GETPOS(j+64, i)] = EncKey[index+i][j];
keys[GETPOS(j+64, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3;
}
while (--Lcount)
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Last one with FLAT_OUT
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA512_CTX ctx;
unsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH];
int len = EncKeyLen[index];
unsigned Lcount = loopCnt - 1;
SHA512_Init( &ctx );
SHA512_Update( &ctx, cursalt, 8 );
SHA512_Update( &ctx, EncKey[index], len );
memcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len);
SHA512_Final( tmp, &ctx);
len += DIGEST_SIZE;
do {
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( tmp, &ctx);
} while (--Lcount);
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( (unsigned char *) crypt_key[index], &ctx);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
int i;
unsigned sixbits;
static union {
unsigned char u8[BINARY_SIZE + 1];
ARCH_WORD_32 u32;
} out;
int bidx=0;
char *pos;
pos = &ciphertext[FORMAT_TAG_LEN + 1 + 8];
for (i = 0; i < 10; ++i) {
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
sixbits >>= 4;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<2);
}
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
return out.u8;
}
static void * get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE + 1];
ARCH_WORD_32 u32;
} salt;
// store off the 'real' 8 bytes of salt
memcpy(salt.u8, &ciphertext[FORMAT_TAG_LEN+1], 8);
// append the 1 byte of loop count information.
salt.u8[8] = ciphertext[FORMAT_TAG_LEN];
return salt.u8;
}
static int get_hash_0(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_0; }
static int get_hash_1(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_1; }
static int get_hash_2(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_2; }
static int get_hash_3(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_3; }
static int get_hash_4(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_4; }
static int get_hash_5(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_5; }
static int get_hash_6(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_6; }
static int salt_hash(void *salt)
{
return *((ARCH_WORD_32 *)salt) & 0x3FF;
}
static unsigned int iteration_count(void *salt)
{
return (unsigned int) 1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]);
}
struct fmt_main fmt_drupal7 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
// true salt is SALT_SIZE but we add the loop count
SALT_SIZE + 1,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
schedule-simd-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -O2" } */
/* { dg-additional-options "-mavx512f" { target { x86_64-*-* i?86-*-* } } } */
#define N 1024
int a[N], b[N], c[N];
void
f1 (void)
{
int i;
#pragma omp parallel for simd schedule (simd:static)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f2 (void)
{
int i;
#pragma omp parallel for simd schedule (simd: static, 7)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f3 (void)
{
int i;
#pragma omp parallel for simd schedule (simd : dynamic, 7)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f4 (void)
{
int i;
#pragma omp parallel for simd schedule ( simd:runtime)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
void
f5 (void)
{
int i;
#pragma omp parallel for simd schedule (simd:auto)
for (i = 0; i < N; i++)
a[i] = b[i] + c[i];
}
|
GB_unaryop__lnot_fp64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int16
// op(A') function: GB_tran__lnot_fp64_int16
// C type: double
// A type: int16_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int16
(
double *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc32)
// A*D function (colscale): GB (_AxD__rdiv_fc32)
// D*A function (rowscale): GB (_DxB__rdiv_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc32)
// C=scalar+B GB (_bind1st__rdiv_fc32)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc32)
// C=A+scalar GB (_bind2nd__rdiv_fc32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_div (bij, aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC32 || GxB_NO_RDIV_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "ext/base64/base64.h"
#include "utils/apitools.h"
#include <random>
namespace dd
{
class DDImg
{
public:
DDImg() {}
~DDImg() {}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z')
|| (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9')
|| (c == '+' || c=='/' || c=='='))
return true;
else return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c: s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else return false;
}
void scale(const cv::Mat &src, cv::Mat &dst) const {
float coef = std::min(static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
cv::resize(src, dst, cv::Size(), coef, coef, CV_INTER_CUBIC);
}
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(),str.end());
cv::Mat img = cv::Mat(cv::imdecode(cv::Mat(vdat,true),
_unchanged_data ? CV_LOAD_IMAGE_UNCHANGED :
(_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)));
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Mat rimg;
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0) {
if (_width == 0 && _height == 0) {
// XXX - Do nothing and keep native resolution. May cause issues if batched images are different resolutions
rimg = img;
} else {
// Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim);
cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC);
}
} else {
// Resize normally to the specified width and height
cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC);
}
if (_crop_width != 0 && _crop_height != 0) {
int widthBorder = (_width - _crop_width)/2;
int heightBorder = (_height - _crop_height)/2;
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height));
}
_imgs.push_back(rimg);
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0,input.end);
size = input.tellg();
input.seekg(0,input.beg);
char* data = new char[size];
input.read(data, size);
std::string str(data,data+size);
delete[]data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname)
{
cv::Mat img = cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED :
(_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR));
if (img.empty())
{
_logger->error("empty image {}",fname);
return -1;
}
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Mat rimg;
try
{
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0) {
if (_width == 0 && _height == 0) {
// Do nothing and keep native resolution. May cause issues if batched images are different resolutions
rimg = img;
} else {
// Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim);
cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC);
}
} else {
// Resize normally to the specified width and height
cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC);
}
}
catch(...)
{
throw InputConnectorBadParamException("failed resizing image " + fname);
}
if (_crop_width != 0 && _crop_height != 0) {
int widthBorder = (_width - _crop_width)/2;
int heightBorder = (_height - _crop_height)/2;
try {
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height));
} catch(...) {
throw InputConnectorBadParamException("failed cropping image " + fname);
}
}
_imgs.push_back(rimg);
return 0;
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content,&ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir)
{
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir,false,true,false,subdirs))
throw InputConnectorBadParamException("failed reading text subdirectories in data directory " + dir);
_logger->info("imginputfileconn: list subdirs size={}",subdirs.size());
// list files and classes
std::vector<std::pair<std::string,int>> lfiles; // labeled files
std::unordered_map<int,std::string> hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while(uit!=subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit),true,false,true,subdir_files))
throw InputConnectorBadParamException("failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while(fit!=subdir_files.end()) // XXX: re-iterating the file is not optimal
{
lfiles.push_back(std::pair<std::string,int>((*fit),cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir,true,false,false,test_files);
auto fit = test_files.begin();
while(fit!=test_files.end())
{
lfiles.push_back(std::pair<std::string,int>((*fit),-1)); // -1 for no class
++fit;
}
}
// read images
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string,int> &p: lfiles)
{
cv::Mat img = cv::imread(p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED :
(_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR));
_imgs_size.push_back(std::pair<int,int>(img.rows,img.cols));
cv::Mat rimg;
try
{
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0) {
if (_width == 0 && _height == 0) {
// Do nothing and keep native resolution. May cause issues if batched images are different resolutions
rimg = img;
} else {
// Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim);
cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC);
}
} else {
// Resize normally to the specified width and height
cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC);
}
}
catch(...)
{
throw InputConnectorBadParamException("failed resizing image " + p.first);
}
if (_crop_width != 0 && _crop_height != 0) {
int widthBorder = (_width - _crop_width)/2;
int heightBorder = (_height - _crop_height)/2;
try {
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height));
} catch(...) {
throw InputConnectorBadParamException("failed cropping image " + p.first);
}
}
_imgs.push_back(rimg);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
_logger->info("read {} images",_imgs.size());
}
return 0;
}
std::vector<cv::Mat> _imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int,int>> _imgs_size;
bool _bw = false;
bool _b64 = false;
bool _unchanged_data = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
std::string _db_fname;
std::shared_ptr<spdlog::logger> _logger;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn()
:InputConnectorStrategy(){}
ImgInputFileConn(const ImgInputFileConn &i)
:InputConnectorStrategy(i),
_width(i._width),_height(i._height),
_crop_width(i._crop_width),_crop_height(i._crop_height),
_bw(i._bw),_unchanged_data(i._unchanged_data),
_mean(i._mean),_has_mean_scalar(i._has_mean_scalar),
_scaled(i._scaled), _scale_min(i._scale_min), _scale_max(i._scale_max) {}
~ImgInputFileConn() {}
void init(const APIData &ad)
{
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
// optional parameters.
if (ad.has("width"))
_width = ad.get("width").get<int>();
if (ad.has("height"))
_height = ad.get("height").get<int>();
if (ad.has("crop_width")) {
_crop_width = ad.get("crop_width").get<int>();
if (_crop_width > _width) {
_logger->error("Crop width must be less than or equal to width");
throw InputConnectorBadParamException("Crop width must be less than or equal to width");
}
}
if (ad.has("crop_height")) {
_crop_height = ad.get("crop_height").get<int>();
if (_crop_height > _height) {
_logger->error("Crop height must be less than or equal to height");
throw InputConnectorBadParamException("Crop height must be less than or equal to height");
}
}
if (ad.has("bw"))
_bw = ad.get("bw").get<bool>();
if (ad.has("unchanged_data"))
_unchanged_data = ad.get("unchanged_data").get<bool>();
if (ad.has("shuffle"))
_shuffle = ad.get("shuffle").get<bool>();
if (ad.has("seed"))
_seed = ad.get("seed").get<int>();
if (ad.has("test_split"))
_test_split = ad.get("test_split").get<double>();
if (ad.has("mean"))
{
apitools::get_floats(ad, "mean", _mean);
_has_mean_scalar = true;
}
// Variable size
if (ad.has("scaled") || ad.has("scale_min") || ad.has("scale_max"))
_scaled = true;
if (ad.has("scale_min"))
_scale_min = ad.get("scale_min").get<int>();
if (ad.has("scale_max"))
_scale_max = ad.get("scale_max").get<int>();
}
int feature_size() const
{
if (_bw || _unchanged_data) {
// XXX: only valid for single channels
if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height;
else return _width*_height;
}
else {
// RGB
if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height*3;
else return _width*_height*3;
}
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void transform(const APIData &ad)
{
get_data(ad);
if (ad.has("parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
std::vector<std::string> failed_uris;
#pragma omp parallel for
for (size_t i=0;i<_uris.size();i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg;
dimg._ctype._bw = _bw;
dimg._ctype._unchanged_data = _unchanged_data;
dimg._ctype._width = _width;
dimg._ctype._height = _height;
dimg._ctype._crop_width = _crop_width;
dimg._ctype._crop_height = _crop_height;
dimg._ctype._scaled = _scaled;
dimg._ctype._scale_min = _scale_min;
dimg._ctype._scale_max = _scale_max;
try
{
if (dimg.read_element(u,this->_logger))
{
_logger->error("no data for image {}",u);
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch(std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
failed_uris.push_back(u);
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
_images.insert(_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
_images_size.insert(_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else uris.push_back(std::to_string(i));
}
}
if (catch_read)
{
for (auto s: failed_uris)
_logger->error("failed reading image {}",s);
throw InputConnectorBadParamException(catch_msg);
}
_uris = uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(),_images.end(),g); //XXX beware: labels are not shuffled, i.e. let's not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0-_test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while(chit!=_images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else ++cpos;
++chit;
}
_images.erase(dchit,_images.end());
_logger->info("data split test size={} / remaining data size={}",_test_images.size(),_images.size());
}
if (_images.empty())
throw InputConnectorBadParamException("no image could be found");
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int,int>> _images_size;
// image parameters
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _bw = false; /**< whether to convert to black & white. */
bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */
double _test_split = 0.0; /**< auto-split of the dataset. */
int _seed = -1; /**< shuffling seed. */
std::vector<float> _mean; /**< mean image pixels, to be subtracted from images. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
};
}
#ifdef USE_CAFFE
#include "caffeinputconns.h"
#endif
#ifdef USE_TF
#include "backends/tf/tfinputconns.h"
#endif
#ifdef USE_DLIB
#include "backends/dlib/dlibinputconns.h"
#endif
#ifdef USE_CAFFE2
#include "backends/caffe2/caffe2inputconns.h"
#endif
#endif
|
triMeshAcceleratorBVH.h | #pragma once
#ifndef _TRIMESH_ACCELERATOR_BVH_H_
#define _TRIMESH_ACCELERATOR_BVH_H_
namespace ml {
template <class FloatType>
struct TriangleBVHNode {
TriangleBVHNode() : rChild(0), lChild(0), leafTri(0) {}
~TriangleBVHNode() {
SAFE_DELETE(rChild);
SAFE_DELETE(lChild);
}
//wait for vs 2013
//template<class T>
//using Triangle = TriMesh::Triangle<T>;
BoundingBox3<FloatType> boundingBox;
const typename TriMesh<FloatType>::Triangle* leafTri;
TriangleBVHNode<FloatType> *lChild;
TriangleBVHNode<FloatType> *rChild;
void computeBoundingBox() {
boundingBox.reset();
if (!lChild && !rChild) {
leafTri->includeInBoundingBox(boundingBox);
} else {
if (lChild) {
lChild->computeBoundingBox();
boundingBox.include(lChild->boundingBox);
}
if (rChild) {
rChild->computeBoundingBox();
boundingBox.include(rChild->boundingBox);
}
}
}
void splitMidPoint(typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& begin, typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& end) {
if (end - begin > 1) {
//determine longest axis
BoundingBox3<FloatType> bbox;
for (auto iter = begin; iter != end; iter++) {
bbox.include((*iter)->getCenter());
}
FloatType maxExtent = bbox.getMaxExtent();
typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator midIter = begin + 1;
if (bbox.getExtentX() > bbox.getExtentY() && bbox.getExtentX() > bbox.getExtentZ()) {
//x
std::stable_sort(begin, end, cmpX);
FloatType middle = bbox.getMinX() + maxExtent / 2;
for (; midIter != end - 1; midIter++) {
if ((*midIter)->getCenter().x >= middle) break;
}
}
else if (bbox.getExtentY() > bbox.getExtentX() && bbox.getExtentY() > bbox.getExtentZ()) {
//y
std::stable_sort(begin, end, cmpY);
FloatType middle = bbox.getMinY() + maxExtent / 2;
for (; midIter != end - 1; midIter++) {
if ((*midIter)->getCenter().y >= middle) break;
}
}
else {
//z
std::stable_sort(begin, end, cmpZ);
FloatType middle = bbox.getMinZ() + maxExtent / 2;
for (; midIter != end - 1; midIter++) {
if ((*midIter)->getCenter().z >= middle) break;
}
}
lChild = new TriangleBVHNode;
rChild = new TriangleBVHNode;
lChild->splitMidPoint(begin, midIter);
rChild->splitMidPoint(midIter, end);
}
else {
assert(end - begin == 1);
leafTri = *begin; //found a leaf
}
}
void splitMedian(typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& begin, typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& end, unsigned int lastSortAxis) {
if (end - begin > 1) {
if (lastSortAxis == 0) std::stable_sort(begin, end, cmpX);
else if (lastSortAxis == 1) std::stable_sort(begin, end, cmpY);
else std::stable_sort(begin, end, cmpZ);
lChild = new TriangleBVHNode;
rChild = new TriangleBVHNode;
const unsigned int newSortAxis = (lastSortAxis + 1) % 3;
lChild->splitMedian(begin, begin + ((end - begin) / 2), newSortAxis);
rChild->splitMedian(begin + ((end - begin) / 2), end, newSortAxis);
}
else {
assert(end - begin == 1);
leafTri = *begin; //found a leaf
}
}
inline bool isLeaf() const {
return !(lChild || rChild);
}
const typename TriMesh<FloatType>::Triangle* intersect(const Ray<FloatType> &r, FloatType& t, FloatType& u, FloatType& v, FloatType& tmin, FloatType& tmax, bool onlyFrontFaces = false) const {
if (t < tmin || t > tmax) return nullptr; //early out (warning t must be initialized)
if (boundingBox.intersect(r, tmin, tmax)) {
if (isLeaf()) {
if (leafTri->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces)) {
tmax = t;
return leafTri;
}
} else {
const typename TriMesh<FloatType>::Triangle* t0 = lChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces);
const typename TriMesh<FloatType>::Triangle* t1 = rChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces);
if (t1) return t1;
if (t0) return t0;
}
}
return nullptr;
}
// collisions with other Triangles
bool intersects(const typename TriMesh<FloatType>::Triangle* tri) const {
if (boundingBox.intersects(tri->getV0().position, tri->getV1().position, tri->getV2().position)) {
if (isLeaf()) {
return tri->intersects(*leafTri);
} else {
return lChild->intersects(tri) || rChild->intersects(tri);
}
} else {
return false;
}
}
bool intersects(const typename TriMesh<FloatType>::Triangle* tri, const Matrix4x4<FloatType>& transform) const {
typename TriMesh<FloatType>::Vertex v0(transform * tri->getV0().position);
typename TriMesh<FloatType>::Vertex v1(transform * tri->getV1().position);
typename TriMesh<FloatType>::Vertex v2(transform * tri->getV2().position);
typename TriMesh<FloatType>::Triangle triTrans(&v0,&v1,&v2);
if (boundingBox.intersects(triTrans.getV0().position, triTrans.getV1().position, triTrans.getV2().position)) {
if (isLeaf()) {
return triTrans.intersects(*leafTri);
}
else {
return lChild->intersects(&triTrans) || rChild->intersects(&triTrans);
}
}
else {
return false;
}
}
// collisions with other TriangleBVHNodes
bool intersects(const TriangleBVHNode& other) const {
if (boundingBox.intersects(other.boundingBox)) {
if (isLeaf()) {
return other.intersects(leafTri);
} else {
return lChild->intersects(other) || rChild->intersects(other);
}
} else {
return false;
}
}
bool intersects(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const {
if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB
if (isLeaf()) {
return other.intersects(leafTri, transform.getInverse());
}
else {
return lChild->intersects(other, transform) || rChild->intersects(other, transform);
}
}
else {
return false;
}
}
bool collisionBBoxOnly(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const {
if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB
if (isLeaf()) {
return true;
}
else {
return lChild->collisionBBoxOnly(other, transform) || rChild->collisionBBoxOnly(other, transform);
}
}
else {
return false;
}
}
unsigned int getTreeDepthRec() const {
unsigned int maxDepth = 0;
if (lChild) maxDepth = std::max(maxDepth, lChild->getTreeDepthRec());
if (rChild) maxDepth = std::max(maxDepth, rChild->getTreeDepthRec());
return maxDepth+1;
}
unsigned int getNumNodesRec() const {
unsigned int numNodes = 1;
if (lChild) numNodes += lChild->getNumNodesRec();
if (rChild) numNodes += rChild->getNumNodesRec();
return numNodes;
}
unsigned int getNumLeaves() const {
unsigned int numLeaves = 0;
if (lChild) numLeaves += lChild->getNumLeaves();
if (rChild) numLeaves += rChild->getNumLeaves();
if (!lChild && !rChild) {
assert(leafTri);
numLeaves++;
}
return numLeaves;
}
static bool cmpX(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) {
return t0->getCenter().x < t1->getCenter().x;
}
static bool cmpY(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) {
return t0->getCenter().y < t1->getCenter().y;
}
static bool cmpZ(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) {
return t0->getCenter().z < t1->getCenter().z;
}
};
template <class FloatType>
class TriMeshAcceleratorBVH : public TriMeshRayAccelerator<FloatType>, public TriMeshCollisionAccelerator<FloatType, TriMeshAcceleratorBVH<FloatType>>
{
public:
TriMeshAcceleratorBVH() {
m_Root = nullptr;
}
TriMeshAcceleratorBVH(const TriMesh<FloatType>& triMesh, bool storeLocalCopy = false) {
m_Root = nullptr;
build(triMesh, storeLocalCopy);
//std::vector<const TriMesh<FloatType>*> meshes;
//meshes.push_back(&triMesh);
//build(meshes, true);
//std::vector<std::pair<const TriMesh<FloatType>*, Matrix4x4<FloatType>>> meshes;
//meshes.push_back(std::make_pair(&triMesh, Matrix4x4<FloatType>::identity()));
//build(meshes);
}
~TriMeshAcceleratorBVH() {
SAFE_DELETE(m_Root);
}
void printInfo() const {
std::cout << "Info: TriangleBVHAccelerator build done ( " << TriMeshRayAccelerator<FloatType>::m_TrianglePointers.size() << " tris )" << std::endl;
std::cout << "Info: Tree depth " << m_Root->getTreeDepthRec() << std::endl;
std::cout << "Info: NumNodes " << m_Root->getNumNodesRec() << std::endl;
std::cout << "Info: NumLeaves " << m_Root->getNumLeaves() << std::endl;
}
private:
//! defined by the interface
bool collisionInternal(const TriMeshAcceleratorBVH<FloatType>& other) const {
return m_Root->intersects(*other.m_Root);
}
bool collisionTransformInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const {
return m_Root->intersects(*other.m_Root, transform);
}
bool collisionTransformBBoxOnlyInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const {
return m_Root->collisionBBoxOnly(*other.m_Root, transform);
}
//! defined by the interface
const typename TriMesh<FloatType>::Triangle* intersectInternal(const Ray<FloatType>& r, FloatType& t, FloatType& u, FloatType& v, FloatType tmin = (FloatType)0, FloatType tmax = std::numeric_limits<FloatType>::max(), bool onlyFrontFaces = false) const {
u = v = std::numeric_limits<FloatType>::max();
t = tmax; //TODO MATTHIAS: probably we don't have to track tmax since t must always be smaller than the prev
return m_Root->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces);
}
//! defined by the interface
void buildInternal() {
SAFE_DELETE(m_Root);
bool useParallelBuild = true;
if (useParallelBuild) {
buildParallel(TriMeshRayAccelerator<FloatType>::m_TrianglePointers);
} else {
buildRecursive(TriMeshRayAccelerator<FloatType>::m_TrianglePointers);
}
}
void buildParallel(std::vector<typename TriMesh<FloatType>::Triangle*>& tris) {
struct NodeEntry {
size_t begin;
size_t end;
TriangleBVHNode<FloatType> *node;
};
std::vector<NodeEntry> currLevel(1);
m_Root = new TriangleBVHNode<FloatType>;
currLevel[0].node = m_Root;
currLevel[0].begin = 0;
currLevel[0].end = tris.size();
unsigned int lastSortAxis = 0;
bool needFurtherSplitting = true;
while(needFurtherSplitting) {
needFurtherSplitting = false;
std::vector<NodeEntry> nextLevel(currLevel.size()*2);
#pragma omp parallel for
for (int i = 0; i < (int)std::min(currLevel.size(),tris.size()); i++) {
const size_t begin = currLevel[i].begin;
const size_t end = currLevel[i].end;
if (end - begin > 1) {
if (lastSortAxis == 0) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpX);
else if (lastSortAxis == 1) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpY);
else std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpZ);
TriangleBVHNode<FloatType>* node = currLevel[i].node;
TriangleBVHNode<FloatType>* lChild = new TriangleBVHNode<FloatType>;
TriangleBVHNode<FloatType>* rChild = new TriangleBVHNode<FloatType>;
node->lChild = lChild;
node->rChild = rChild;
nextLevel[2*i+0].begin = begin;
nextLevel[2*i+0].end = begin + ((end-begin)/2);
nextLevel[2*i+1].begin = begin + ((end-begin)/2);
nextLevel[2*i+1].end = end;
nextLevel[2*i+0].node = currLevel[i].node->lChild;
nextLevel[2*i+1].node = currLevel[i].node->rChild;
if (nextLevel[2*i+0].end - nextLevel[2*i+0].begin < 2) lChild->leafTri = tris[nextLevel[2*i+0].begin];
else needFurtherSplitting = true;
if (nextLevel[2*i+1].end - nextLevel[2*i+1].begin < 2) rChild->leafTri = tris[nextLevel[2*i+1].begin];
else needFurtherSplitting = true;
}
}
if (needFurtherSplitting) {
currLevel = nextLevel;
lastSortAxis = (lastSortAxis+1)%3;
}
}
m_Root->computeBoundingBox();
}
void buildRecursive(std::vector<typename TriMesh<FloatType>::Triangle*>& tris) {
assert(tris.size() > 2);
m_Root = new TriangleBVHNode<FloatType>;
//m_Root->splitMedian(tris.begin(), tris.end(), 0);
m_Root->splitMidPoint(tris.begin(), tris.end());
m_Root->computeBoundingBox();
}
//! private data
TriangleBVHNode<FloatType>* m_Root;
};
typedef TriMeshAcceleratorBVH<float> TriMeshAcceleratorBVHf;
typedef TriMeshAcceleratorBVH<double> TriMeshAcceleratorBVHd;
} // namespace ml
#endif
|
libperf_int.h | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifndef LIBPERF_INT_H_
#define LIBPERF_INT_H_
#include <tools/perf/api/libperf.h>
BEGIN_C_DECLS
/** @file libperf_int.h */
#include <ucs/async/async.h>
#include <ucs/time/time.h>
#include <ucs/sys/math.h>
#if _OPENMP
#include <omp.h>
#endif
#define TIMING_QUEUE_SIZE 2048
#define UCT_PERF_TEST_AM_ID 5
#define ADDR_BUF_SIZE 2048
#define UCX_PERF_TEST_FOREACH(perf) \
while (!ucx_perf_context_done(perf))
#define rte_call(_perf, _func, ...) \
((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__))
typedef struct ucx_perf_context ucx_perf_context_t;
typedef struct uct_peer uct_peer_t;
typedef struct ucp_perf_request ucp_perf_request_t;
typedef struct ucx_perf_thread_context ucx_perf_thread_context_t;
struct ucx_perf_allocator {
ucs_memory_type_t mem_type;
ucs_status_t (*init)(ucx_perf_context_t *perf);
ucs_status_t (*ucp_alloc)(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag);
void (*ucp_free)(const ucx_perf_context_t *perf, void *address,
ucp_mem_h memh);
ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem);
void (*uct_free)(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem);
void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count);
void* (*memset)(void *dst, int value, size_t count);
};
struct ucx_perf_context {
ucx_perf_params_t params;
/* Buffers */
void *send_buffer;
void *recv_buffer;
/* Measurements */
double start_time_acc; /* accurate start time */
ucs_time_t end_time; /* inaccurate end time (upper bound) */
ucs_time_t prev_time; /* time of previous iteration */
ucs_time_t report_interval; /* interval of showing report */
ucx_perf_counter_t max_iter;
/* Measurements of current/previous **report** */
struct {
ucx_perf_counter_t msgs; /* number of messages */
ucx_perf_counter_t bytes; /* number of bytes */
ucx_perf_counter_t iters; /* number of iterations */
ucs_time_t time; /* inaccurate time (for median and report interval) */
double time_acc; /* accurate time (for avg latency/bw/msgrate) */
} current, prev;
ucs_time_t timing_queue[TIMING_QUEUE_SIZE];
unsigned timing_queue_head;
const ucx_perf_allocator_t *allocator;
union {
struct {
ucs_async_context_t async;
uct_component_h cmpt;
uct_md_h md;
uct_worker_h worker;
uct_iface_h iface;
uct_peer_t *peers;
uct_allocated_memory_t send_mem;
uct_allocated_memory_t recv_mem;
uct_iov_t *iov;
} uct;
struct {
ucp_context_h context;
ucx_perf_thread_context_t* tctx;
ucp_worker_h worker;
ucp_ep_h ep;
ucp_rkey_h rkey;
unsigned long remote_addr;
ucp_mem_h send_memh;
ucp_mem_h recv_memh;
ucp_dt_iov_t *send_iov;
ucp_dt_iov_t *recv_iov;
void *am_hdr;
} ucp;
};
};
struct ucx_perf_thread_context {
pthread_t pt;
int tid;
ucs_status_t status;
ucx_perf_context_t perf;
ucx_perf_result_t result;
};
struct uct_peer {
uct_ep_h ep;
unsigned long remote_addr;
uct_rkey_bundle_t rkey;
};
struct ucp_perf_request {
void *context;
};
typedef struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs_t;
extern ucx_perf_funcs_t ucx_perf_funcs[];
void ucx_perf_test_start_clock(ucx_perf_context_t *perf);
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index);
void uct_perf_iface_flush_b(ucx_perf_context_t *perf);
ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf);
ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf);
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result);
void uct_perf_barrier(ucx_perf_context_t *perf);
void ucp_perf_barrier(ucx_perf_context_t *perf);
ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf);
void ucp_perf_test_free_mem(ucx_perf_context_t *perf);
ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf);
void uct_perf_test_free_mem(ucx_perf_context_t *perf);
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params);
void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params);
/**
* Get the total length of the message size given by parameters
*/
size_t ucx_perf_get_message_size(const ucx_perf_params_t *params);
static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf)
{
return ucs_unlikely((perf->current.iters >= perf->max_iter) ||
(perf->current.time > perf->end_time));
}
static inline void ucx_perf_get_time(ucx_perf_context_t *perf)
{
perf->current.time_acc = ucs_get_accurate_time();
}
static inline void ucx_perf_omp_barrier(ucx_perf_context_t *perf)
{
#if _OPENMP
if (perf->params.thread_count > 1) {
#pragma omp barrier
}
#endif
}
static inline void ucx_perf_update(ucx_perf_context_t *perf,
ucx_perf_counter_t iters, size_t bytes)
{
ucx_perf_result_t result;
perf->current.time = ucs_get_time();
perf->current.iters += iters;
perf->current.bytes += bytes;
perf->current.msgs += 1;
perf->timing_queue[perf->timing_queue_head] =
perf->current.time - perf->prev_time;
++perf->timing_queue_head;
if (perf->timing_queue_head == TIMING_QUEUE_SIZE) {
perf->timing_queue_head = 0;
}
perf->prev_time = perf->current.time;
if (perf->current.time - perf->prev.time >= perf->report_interval) {
ucx_perf_get_time(perf);
ucx_perf_calc_result(perf, &result);
rte_call(perf, report, &result, perf->params.report_arg, 0, 0);
perf->prev = perf->current;
}
}
END_C_DECLS
#endif
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterTypes
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0 }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0 }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067 },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929 },
{ Cosine, 1.0, 1.0, 0.0, 0.0 }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Interger Radius */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ( IfMagickTrue(cylindrical) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterTypes) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterTypes) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= IfMagickTrue(cylindrical) ? JincFilter
: SincFastFilter;
window_type=(FilterTypes) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->window=filters[window_type].function;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IfStringTrue(GetImageArtifact(image,"filter:verbose")))
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double)resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->signature=(~MagickSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(resize_image,q) == 0)
{
q+=GetPixelChannels(resize_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(resize_image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
if ((columns >= (2*image->columns)) || (rows >= (2*image->rows)))
{
Image
*resize_image;
size_t
height,
width;
/*
Honor liquid resize size limitations.
*/
for (width=image->columns; columns >= (2*width-1); width*=2) ;
for (height=image->rows; rows >= (2*height-1); height*=2) ;
resize_image=ResizeImage(image,width,height,image->filter,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x,
rigidity,exception);
resize_image=DestroyImage(resize_image);
return(rescale_image);
}
pixel_info=AcquireVirtualMemory(image->columns,image->rows*
GetPixelChannels(image)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*restrict q;
register ssize_t
i;
q=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (q == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),q);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*restrict p;
register Quantum
*restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=(magnify_image->columns-1)*GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=(magnify_image->columns-1)*GetPixelChannels(magnify_image);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterTypes filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*restrict p;
register ContributionInfo
*restrict contribution;
register Quantum
*restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(resize_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(resize_image,q) == 0))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*restrict p;
register ContributionInfo
*restrict contribution;
register Quantum
*restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(resize_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(resize_image,q) == 0))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterTypes filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return(resize_image);
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return(DestroyImage(resize_image));
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait == BlendPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,sample_image,1,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*restrict p;
register Quantum
*restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(sample_image,q) == 0)
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
sample_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sample_traits=GetPixelChannelTraits(sample_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sample_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
gamma,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelChannel
channel;
PixelTrait
scale_traits,
traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t)
scale_image->columns,MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*y_vector));
if ((scanline == (double *) NULL) ||
(scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) ||
(y_vector == (double *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
for (i=0; i < (ssize_t) (GetPixelChannels(image)*image->columns); i++)
y_vector[i]=0.0;
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*restrict p;
register Quantum
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
alpha=QuantumScale*GetPixelAlpha(image,p);
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double)
p[i];
continue;
}
alpha=QuantumScale*GetPixelAlpha(image,p);
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
alpha=QuantumScale*GetPixelAlpha(image,p);
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelReadMask(scale_image,q) == 0)
{
q+=GetPixelChannels(scale_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(scale_image); i++)
{
ssize_t
offset;
channel=GetPixelChannelChannel(scale_image,i);
traits=GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
offset=GetPixelChannelOffset(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+offset]),q);
continue;
}
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelChannel(image,AlphaPixelChannel)];
gamma=PerceptibleReciprocal(alpha);
SetPixelChannel(scale_image,channel,ClampToQuantum(gamma*scanline[
x*GetPixelChannels(image)+offset]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
n;
/*
Scale X direction.
*/
next_column=MagickFalse;
n=0;
span.x=1.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
n++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[n*MaxPixelChannels+channel]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
n++;
next_column=MagickFalse;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
((ssize_t) n < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
channel=GetPixelChannelChannel(image,i);
scale_scanline[n*MaxPixelChannels+channel]=pixel[i];
}
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelReadMask(scale_image,q) == 0)
{
q+=GetPixelChannels(scale_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(scale_image); i++)
{
channel=GetPixelChannelChannel(scale_image,i);
traits=GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*MaxPixelChannels+channel]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(scale_scanline[
x*MaxPixelChannels+channel]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
value[MaxTextExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
size_t
version;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MaxTextExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if ( IfMagickTrue(GetPathAttributes(image->filename,&attributes)) )
{
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,value);
(void) ConcatenateMagickString(value,"B",MaxTextExtent);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",GetMagickVersion(&version),
exception);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include <algorithm>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
if (out_index_ != nullptr)
num_edges_ = out_index_[num_nodes_] - out_index_[0];
else
num_edges_ = in_index_[num_nodes_] - in_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
void setGraphProperties(int64_t nodes, int64_t edges, bool isDirected)
{
num_nodes_ = nodes;
num_edges_ = edges;
directed_ = isDirected;
}
void setGraphDatastructures(DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs)
{
out_index_ = out_index;
out_neighbors_ = out_neighs;
in_index_ = in_index;
in_neighbors_ = in_neighs;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
// Is m a neighbor of n?
bool isNeighbor(NodeID_ n, NodeID_ m) const {
return std::binary_search(out_index_[n], out_index_[n+1], m);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#if 0
static DestID_** relabelIndex(const pvector<SGOffset> &offsets, DestID_* neighs, std::map<NodeID_, int64_t> reMap) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
#endif
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
DestID_** returnOffsetsArray()
{
//PageRank specific
return in_index_;
}
DestID_* returnCoordsArray()
{
//PageRank specific
return in_neighbors_;
}
DestID_** out_index()
{
return out_index_;
}
DestID_* out_neighbors()
{
return out_neighbors_;
}
DestID_** in_index()
{
return in_index_;
}
DestID_* in_neighbors()
{
return in_neighbors_;
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
scheduleg-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=20,chunk,a[n],suma=0;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones y/o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) n=20; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(guided,chunk)
//Empezamos por el bloque más largo posible, hacemos una
//distribución previa igual que el static.
//Posteriormente, si una hebra queda esperando, se reasigna
//el trabajo restante previamente asignado a las otras hebras,
//y ahora si que se tiene en cuenta el chunk para reasignar.
for (i=0; i<n; i++)
{ suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
return(0);
}
|
convolution_3x3_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(outptr0 + 4 * 2, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(outptr0 + 4 * 3, 0);
v4f32 _sum4 = (v4f32)__msa_ld_w(outptr0 + 4 * 4, 0);
v4f32 _sum5 = (v4f32)__msa_ld_w(outptr0 + 4 * 5, 0);
v4f32 _sum6 = (v4f32)__msa_ld_w(outptr0 + 4 * 6, 0);
v4f32 _sum7 = (v4f32)__msa_ld_w(outptr0 + 4 * 7, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4i32 _r0n = __msa_ld_w(r0 + 4, 0);
v4i32 _r0nn = __msa_ld_w(r0 + 8, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
v4f32 _r04 = (v4f32)__msa_splati_w(_r0n, 0);
v4f32 _r05 = (v4f32)__msa_splati_w(_r0n, 1);
v4f32 _r06 = (v4f32)__msa_splati_w(_r0n, 2);
v4f32 _r07 = (v4f32)__msa_splati_w(_r0n, 3);
v4f32 _r08 = (v4f32)__msa_splati_w(_r0nn, 0);
v4f32 _r09 = (v4f32)__msa_splati_w(_r0nn, 1);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r01, _k00);
_sum2 = __msa_fmadd_w(_sum2, _r02, _k00);
_sum3 = __msa_fmadd_w(_sum3, _r03, _k00);
_sum4 = __msa_fmadd_w(_sum4, _r04, _k00);
_sum5 = __msa_fmadd_w(_sum5, _r05, _k00);
_sum6 = __msa_fmadd_w(_sum6, _r06, _k00);
_sum7 = __msa_fmadd_w(_sum7, _r07, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k01);
_sum2 = __msa_fmadd_w(_sum2, _r03, _k01);
_sum3 = __msa_fmadd_w(_sum3, _r04, _k01);
_sum4 = __msa_fmadd_w(_sum4, _r05, _k01);
_sum5 = __msa_fmadd_w(_sum5, _r06, _k01);
_sum6 = __msa_fmadd_w(_sum6, _r07, _k01);
_sum7 = __msa_fmadd_w(_sum7, _r08, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k02);
_sum2 = __msa_fmadd_w(_sum2, _r04, _k02);
_sum3 = __msa_fmadd_w(_sum3, _r05, _k02);
_sum4 = __msa_fmadd_w(_sum4, _r06, _k02);
_sum5 = __msa_fmadd_w(_sum5, _r07, _k02);
_sum6 = __msa_fmadd_w(_sum6, _r08, _k02);
_sum7 = __msa_fmadd_w(_sum7, _r09, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4i32 _r1n = __msa_ld_w(r1 + 4, 0);
v4i32 _r1nn = __msa_ld_w(r1 + 8, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
v4f32 _r14 = (v4f32)__msa_splati_w(_r1n, 0);
v4f32 _r15 = (v4f32)__msa_splati_w(_r1n, 1);
v4f32 _r16 = (v4f32)__msa_splati_w(_r1n, 2);
v4f32 _r17 = (v4f32)__msa_splati_w(_r1n, 3);
v4f32 _r18 = (v4f32)__msa_splati_w(_r1nn, 0);
v4f32 _r19 = (v4f32)__msa_splati_w(_r1nn, 1);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r11, _k10);
_sum2 = __msa_fmadd_w(_sum2, _r12, _k10);
_sum3 = __msa_fmadd_w(_sum3, _r13, _k10);
_sum4 = __msa_fmadd_w(_sum4, _r14, _k10);
_sum5 = __msa_fmadd_w(_sum5, _r15, _k10);
_sum6 = __msa_fmadd_w(_sum6, _r16, _k10);
_sum7 = __msa_fmadd_w(_sum7, _r17, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k11);
_sum2 = __msa_fmadd_w(_sum2, _r13, _k11);
_sum3 = __msa_fmadd_w(_sum3, _r14, _k11);
_sum4 = __msa_fmadd_w(_sum4, _r15, _k11);
_sum5 = __msa_fmadd_w(_sum5, _r16, _k11);
_sum6 = __msa_fmadd_w(_sum6, _r17, _k11);
_sum7 = __msa_fmadd_w(_sum7, _r18, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k12);
_sum2 = __msa_fmadd_w(_sum2, _r14, _k12);
_sum3 = __msa_fmadd_w(_sum3, _r15, _k12);
_sum4 = __msa_fmadd_w(_sum4, _r16, _k12);
_sum5 = __msa_fmadd_w(_sum5, _r17, _k12);
_sum6 = __msa_fmadd_w(_sum6, _r18, _k12);
_sum7 = __msa_fmadd_w(_sum7, _r19, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4i32 _r2n = __msa_ld_w(r2 + 4, 0);
v4i32 _r2nn = __msa_ld_w(r2 + 8, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
v4f32 _r24 = (v4f32)__msa_splati_w(_r2n, 0);
v4f32 _r25 = (v4f32)__msa_splati_w(_r2n, 1);
v4f32 _r26 = (v4f32)__msa_splati_w(_r2n, 2);
v4f32 _r27 = (v4f32)__msa_splati_w(_r2n, 3);
v4f32 _r28 = (v4f32)__msa_splati_w(_r2nn, 0);
v4f32 _r29 = (v4f32)__msa_splati_w(_r2nn, 1);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r21, _k20);
_sum2 = __msa_fmadd_w(_sum2, _r22, _k20);
_sum3 = __msa_fmadd_w(_sum3, _r23, _k20);
_sum4 = __msa_fmadd_w(_sum4, _r24, _k20);
_sum5 = __msa_fmadd_w(_sum5, _r25, _k20);
_sum6 = __msa_fmadd_w(_sum6, _r26, _k20);
_sum7 = __msa_fmadd_w(_sum7, _r27, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k21);
_sum2 = __msa_fmadd_w(_sum2, _r23, _k21);
_sum3 = __msa_fmadd_w(_sum3, _r24, _k21);
_sum4 = __msa_fmadd_w(_sum4, _r25, _k21);
_sum5 = __msa_fmadd_w(_sum5, _r26, _k21);
_sum6 = __msa_fmadd_w(_sum6, _r27, _k21);
_sum7 = __msa_fmadd_w(_sum7, _r28, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k22);
_sum2 = __msa_fmadd_w(_sum2, _r24, _k22);
_sum3 = __msa_fmadd_w(_sum3, _r25, _k22);
_sum4 = __msa_fmadd_w(_sum4, _r26, _k22);
_sum5 = __msa_fmadd_w(_sum5, _r27, _k22);
_sum6 = __msa_fmadd_w(_sum6, _r28, _k22);
_sum7 = __msa_fmadd_w(_sum7, _r29, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0);
outptr0 += 4 * 8;
r0 += 8;
r1 += 8;
r2 += 8;
}
for (; j + 3 < outw; j += 4)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(outptr0 + 4 * 2, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(outptr0 + 4 * 3, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4i32 _r0n = __msa_ld_w(r0 + 4, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
v4f32 _r04 = (v4f32)__msa_splati_w(_r0n, 0);
v4f32 _r05 = (v4f32)__msa_splati_w(_r0n, 1);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r01, _k00);
_sum2 = __msa_fmadd_w(_sum2, _r02, _k00);
_sum3 = __msa_fmadd_w(_sum3, _r03, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k01);
_sum2 = __msa_fmadd_w(_sum2, _r03, _k01);
_sum3 = __msa_fmadd_w(_sum3, _r04, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k02);
_sum2 = __msa_fmadd_w(_sum2, _r04, _k02);
_sum3 = __msa_fmadd_w(_sum3, _r05, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4i32 _r1n = __msa_ld_w(r1 + 4, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
v4f32 _r14 = (v4f32)__msa_splati_w(_r1n, 0);
v4f32 _r15 = (v4f32)__msa_splati_w(_r1n, 1);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r11, _k10);
_sum2 = __msa_fmadd_w(_sum2, _r12, _k10);
_sum3 = __msa_fmadd_w(_sum3, _r13, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k11);
_sum2 = __msa_fmadd_w(_sum2, _r13, _k11);
_sum3 = __msa_fmadd_w(_sum3, _r14, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k12);
_sum2 = __msa_fmadd_w(_sum2, _r14, _k12);
_sum3 = __msa_fmadd_w(_sum3, _r15, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4i32 _r2n = __msa_ld_w(r2 + 4, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
v4f32 _r24 = (v4f32)__msa_splati_w(_r2n, 0);
v4f32 _r25 = (v4f32)__msa_splati_w(_r2n, 1);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r21, _k20);
_sum2 = __msa_fmadd_w(_sum2, _r22, _k20);
_sum3 = __msa_fmadd_w(_sum3, _r23, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k21);
_sum2 = __msa_fmadd_w(_sum2, _r23, _k21);
_sum3 = __msa_fmadd_w(_sum3, _r24, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k22);
_sum2 = __msa_fmadd_w(_sum2, _r24, _k22);
_sum3 = __msa_fmadd_w(_sum3, _r25, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
outptr0 += 4 * 4;
r0 += 4;
r1 += 4;
r2 += 4;
}
for (; j + 1 < outw; j += 2)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r01, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r11, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r21, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 2;
r1 += 2;
r2 += 2;
}
for (; j < outw; j++)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 1;
r1 += 1;
r2 += 1;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(outptr0 + 4 * 2, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(outptr0 + 4 * 3, 0);
v4f32 _sum4 = (v4f32)__msa_ld_w(outptr0 + 4 * 4, 0);
v4f32 _sum5 = (v4f32)__msa_ld_w(outptr0 + 4 * 5, 0);
v4f32 _sum6 = (v4f32)__msa_ld_w(outptr0 + 4 * 6, 0);
v4f32 _sum7 = (v4f32)__msa_ld_w(outptr0 + 4 * 7, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4i32 _r0n = __msa_ld_w(r0 + 4, 0);
v4i32 _r0nn = __msa_ld_w(r0 + 8, 0);
v4i32 _r0nnn = __msa_ld_w(r0 + 12, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
v4f32 _r04 = (v4f32)__msa_splati_w(_r0n, 0);
v4f32 _r05 = (v4f32)__msa_splati_w(_r0n, 1);
v4f32 _r06 = (v4f32)__msa_splati_w(_r0n, 2);
v4f32 _r07 = (v4f32)__msa_splati_w(_r0n, 3);
v4f32 _r08 = (v4f32)__msa_splati_w(_r0nn, 0);
v4f32 _r09 = (v4f32)__msa_splati_w(_r0nn, 1);
v4f32 _r0a = (v4f32)__msa_splati_w(_r0nn, 2);
v4f32 _r0b = (v4f32)__msa_splati_w(_r0nn, 3);
v4f32 _r0c = (v4f32)__msa_splati_w(_r0nnn, 0);
v4f32 _r0d = (v4f32)__msa_splati_w(_r0nnn, 1);
v4f32 _r0e = (v4f32)__msa_splati_w(_r0nnn, 2);
v4f32 _r0f = (v4f32)__msa_splati_w(_r0nnn, 3);
v4f32 _r0g = __msa_fill_w_f32(r0[16]);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k00);
_sum2 = __msa_fmadd_w(_sum2, _r04, _k00);
_sum3 = __msa_fmadd_w(_sum3, _r06, _k00);
_sum4 = __msa_fmadd_w(_sum4, _r08, _k00);
_sum5 = __msa_fmadd_w(_sum5, _r0a, _k00);
_sum6 = __msa_fmadd_w(_sum6, _r0c, _k00);
_sum7 = __msa_fmadd_w(_sum7, _r0e, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k01);
_sum2 = __msa_fmadd_w(_sum2, _r05, _k01);
_sum3 = __msa_fmadd_w(_sum3, _r07, _k01);
_sum4 = __msa_fmadd_w(_sum4, _r09, _k01);
_sum5 = __msa_fmadd_w(_sum5, _r0b, _k01);
_sum6 = __msa_fmadd_w(_sum6, _r0d, _k01);
_sum7 = __msa_fmadd_w(_sum7, _r0f, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r04, _k02);
_sum2 = __msa_fmadd_w(_sum2, _r06, _k02);
_sum3 = __msa_fmadd_w(_sum3, _r08, _k02);
_sum4 = __msa_fmadd_w(_sum4, _r0a, _k02);
_sum5 = __msa_fmadd_w(_sum5, _r0c, _k02);
_sum6 = __msa_fmadd_w(_sum6, _r0e, _k02);
_sum7 = __msa_fmadd_w(_sum7, _r0g, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4i32 _r1n = __msa_ld_w(r1 + 4, 0);
v4i32 _r1nn = __msa_ld_w(r1 + 8, 0);
v4i32 _r1nnn = __msa_ld_w(r1 + 12, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
v4f32 _r14 = (v4f32)__msa_splati_w(_r1n, 0);
v4f32 _r15 = (v4f32)__msa_splati_w(_r1n, 1);
v4f32 _r16 = (v4f32)__msa_splati_w(_r1n, 2);
v4f32 _r17 = (v4f32)__msa_splati_w(_r1n, 3);
v4f32 _r18 = (v4f32)__msa_splati_w(_r1nn, 0);
v4f32 _r19 = (v4f32)__msa_splati_w(_r1nn, 1);
v4f32 _r1a = (v4f32)__msa_splati_w(_r1nn, 2);
v4f32 _r1b = (v4f32)__msa_splati_w(_r1nn, 3);
v4f32 _r1c = (v4f32)__msa_splati_w(_r1nnn, 0);
v4f32 _r1d = (v4f32)__msa_splati_w(_r1nnn, 1);
v4f32 _r1e = (v4f32)__msa_splati_w(_r1nnn, 2);
v4f32 _r1f = (v4f32)__msa_splati_w(_r1nnn, 3);
v4f32 _r1g = __msa_fill_w_f32(r1[16]);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k10);
_sum2 = __msa_fmadd_w(_sum2, _r14, _k10);
_sum3 = __msa_fmadd_w(_sum3, _r16, _k10);
_sum4 = __msa_fmadd_w(_sum4, _r18, _k10);
_sum5 = __msa_fmadd_w(_sum5, _r1a, _k10);
_sum6 = __msa_fmadd_w(_sum6, _r1c, _k10);
_sum7 = __msa_fmadd_w(_sum7, _r1e, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k11);
_sum2 = __msa_fmadd_w(_sum2, _r15, _k11);
_sum3 = __msa_fmadd_w(_sum3, _r17, _k11);
_sum4 = __msa_fmadd_w(_sum4, _r19, _k11);
_sum5 = __msa_fmadd_w(_sum5, _r1b, _k11);
_sum6 = __msa_fmadd_w(_sum6, _r1d, _k11);
_sum7 = __msa_fmadd_w(_sum7, _r1f, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r14, _k12);
_sum2 = __msa_fmadd_w(_sum2, _r16, _k12);
_sum3 = __msa_fmadd_w(_sum3, _r18, _k12);
_sum4 = __msa_fmadd_w(_sum4, _r1a, _k12);
_sum5 = __msa_fmadd_w(_sum5, _r1c, _k12);
_sum6 = __msa_fmadd_w(_sum6, _r1e, _k12);
_sum7 = __msa_fmadd_w(_sum7, _r1g, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4i32 _r2n = __msa_ld_w(r2 + 4, 0);
v4i32 _r2nn = __msa_ld_w(r2 + 8, 0);
v4i32 _r2nnn = __msa_ld_w(r2 + 12, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
v4f32 _r24 = (v4f32)__msa_splati_w(_r2n, 0);
v4f32 _r25 = (v4f32)__msa_splati_w(_r2n, 1);
v4f32 _r26 = (v4f32)__msa_splati_w(_r2n, 2);
v4f32 _r27 = (v4f32)__msa_splati_w(_r2n, 3);
v4f32 _r28 = (v4f32)__msa_splati_w(_r2nn, 0);
v4f32 _r29 = (v4f32)__msa_splati_w(_r2nn, 1);
v4f32 _r2a = (v4f32)__msa_splati_w(_r2nn, 2);
v4f32 _r2b = (v4f32)__msa_splati_w(_r2nn, 3);
v4f32 _r2c = (v4f32)__msa_splati_w(_r2nnn, 0);
v4f32 _r2d = (v4f32)__msa_splati_w(_r2nnn, 1);
v4f32 _r2e = (v4f32)__msa_splati_w(_r2nnn, 2);
v4f32 _r2f = (v4f32)__msa_splati_w(_r2nnn, 3);
v4f32 _r2g = __msa_fill_w_f32(r2[16]);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k20);
_sum2 = __msa_fmadd_w(_sum2, _r24, _k20);
_sum3 = __msa_fmadd_w(_sum3, _r26, _k20);
_sum4 = __msa_fmadd_w(_sum4, _r28, _k20);
_sum5 = __msa_fmadd_w(_sum5, _r2a, _k20);
_sum6 = __msa_fmadd_w(_sum6, _r2c, _k20);
_sum7 = __msa_fmadd_w(_sum7, _r2e, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k21);
_sum2 = __msa_fmadd_w(_sum2, _r25, _k21);
_sum3 = __msa_fmadd_w(_sum3, _r27, _k21);
_sum4 = __msa_fmadd_w(_sum4, _r29, _k21);
_sum5 = __msa_fmadd_w(_sum5, _r2b, _k21);
_sum6 = __msa_fmadd_w(_sum6, _r2d, _k21);
_sum7 = __msa_fmadd_w(_sum7, _r2f, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r24, _k22);
_sum2 = __msa_fmadd_w(_sum2, _r26, _k22);
_sum3 = __msa_fmadd_w(_sum3, _r28, _k22);
_sum4 = __msa_fmadd_w(_sum4, _r2a, _k22);
_sum5 = __msa_fmadd_w(_sum5, _r2c, _k22);
_sum6 = __msa_fmadd_w(_sum6, _r2e, _k22);
_sum7 = __msa_fmadd_w(_sum7, _r2g, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0);
outptr0 += 4 * 8;
r0 += 16;
r1 += 16;
r2 += 16;
}
for (; j + 3 < outw; j += 4)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4f32 _sum2 = (v4f32)__msa_ld_w(outptr0 + 4 * 2, 0);
v4f32 _sum3 = (v4f32)__msa_ld_w(outptr0 + 4 * 3, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4i32 _r0n = __msa_ld_w(r0 + 4, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
v4f32 _r04 = (v4f32)__msa_splati_w(_r0n, 0);
v4f32 _r05 = (v4f32)__msa_splati_w(_r0n, 1);
v4f32 _r06 = (v4f32)__msa_splati_w(_r0n, 2);
v4f32 _r07 = (v4f32)__msa_splati_w(_r0n, 3);
v4f32 _r08 = __msa_fill_w_f32(r0[8]);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k00);
_sum2 = __msa_fmadd_w(_sum2, _r04, _k00);
_sum3 = __msa_fmadd_w(_sum3, _r06, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k01);
_sum2 = __msa_fmadd_w(_sum2, _r05, _k01);
_sum3 = __msa_fmadd_w(_sum3, _r07, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r04, _k02);
_sum2 = __msa_fmadd_w(_sum2, _r06, _k02);
_sum3 = __msa_fmadd_w(_sum3, _r08, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4i32 _r1n = __msa_ld_w(r1 + 4, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
v4f32 _r14 = (v4f32)__msa_splati_w(_r1n, 0);
v4f32 _r15 = (v4f32)__msa_splati_w(_r1n, 1);
v4f32 _r16 = (v4f32)__msa_splati_w(_r1n, 2);
v4f32 _r17 = (v4f32)__msa_splati_w(_r1n, 3);
v4f32 _r18 = __msa_fill_w_f32(r1[8]);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k10);
_sum2 = __msa_fmadd_w(_sum2, _r14, _k10);
_sum3 = __msa_fmadd_w(_sum3, _r16, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k11);
_sum2 = __msa_fmadd_w(_sum2, _r15, _k11);
_sum3 = __msa_fmadd_w(_sum3, _r17, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r14, _k12);
_sum2 = __msa_fmadd_w(_sum2, _r16, _k12);
_sum3 = __msa_fmadd_w(_sum3, _r18, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4i32 _r2n = __msa_ld_w(r2 + 4, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
v4f32 _r24 = (v4f32)__msa_splati_w(_r2n, 0);
v4f32 _r25 = (v4f32)__msa_splati_w(_r2n, 1);
v4f32 _r26 = (v4f32)__msa_splati_w(_r2n, 2);
v4f32 _r27 = (v4f32)__msa_splati_w(_r2n, 3);
v4f32 _r28 = __msa_fill_w_f32(r2[8]);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k20);
_sum2 = __msa_fmadd_w(_sum2, _r24, _k20);
_sum3 = __msa_fmadd_w(_sum3, _r26, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k21);
_sum2 = __msa_fmadd_w(_sum2, _r25, _k21);
_sum3 = __msa_fmadd_w(_sum3, _r27, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r24, _k22);
_sum2 = __msa_fmadd_w(_sum2, _r26, _k22);
_sum3 = __msa_fmadd_w(_sum3, _r28, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0);
outptr0 += 4 * 4;
r0 += 8;
r1 += 8;
r2 += 8;
}
for (; j + 1 < outw; j += 2)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3);
v4f32 _r04 = __msa_fill_w_f32(r0[4]);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum1 = __msa_fmadd_w(_sum1, _r02, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum1 = __msa_fmadd_w(_sum1, _r03, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
_sum1 = __msa_fmadd_w(_sum1, _r04, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3);
v4f32 _r14 = __msa_fill_w_f32(r1[4]);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum1 = __msa_fmadd_w(_sum1, _r12, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum1 = __msa_fmadd_w(_sum1, _r13, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
_sum1 = __msa_fmadd_w(_sum1, _r14, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3);
v4f32 _r24 = __msa_fill_w_f32(r2[4]);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum1 = __msa_fmadd_w(_sum1, _r22, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum1 = __msa_fmadd_w(_sum1, _r23, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
_sum1 = __msa_fmadd_w(_sum1, _r24, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 4 * 2;
r0 += 4;
r1 += 4;
r2 += 4;
}
for (; j < outw; j++)
{
v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0);
v4i32 _r0 = __msa_ld_w(r0, 0);
v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0);
v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1);
v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2);
_sum0 = __msa_fmadd_w(_sum0, _r00, _k00);
_sum0 = __msa_fmadd_w(_sum0, _r01, _k01);
_sum0 = __msa_fmadd_w(_sum0, _r02, _k02);
v4i32 _r1 = __msa_ld_w(r1, 0);
v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0);
v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1);
v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2);
_sum0 = __msa_fmadd_w(_sum0, _r10, _k10);
_sum0 = __msa_fmadd_w(_sum0, _r11, _k11);
_sum0 = __msa_fmadd_w(_sum0, _r12, _k12);
v4i32 _r2 = __msa_ld_w(r2, 0);
v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0);
v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1);
v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2);
_sum0 = __msa_fmadd_w(_sum0, _r20, _k20);
_sum0 = __msa_fmadd_w(_sum0, _r21, _k21);
_sum0 = __msa_fmadd_w(_sum0, _r22, _k22);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() {
return ::GetCurrentProcessId();
}
#else
inline size_t current_process_id() {
return getpid();
}
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* indptr,
const nnvm::dim_t end,
const nnvm::dim_t idx_size) {
if (indptr[i + 1] < 0 || indptr[i + 1] < indptr[i] || (i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const RType* indptr,
const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i + 1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i + 1] - 1 && idx[j] >= idx[j + 1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const nnvm::dim_t end,
const nnvm::dim_t nrows) {
if ((i < end && idx[i + 1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template <typename xpu>
void CheckFormatWrapper(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatCSRImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1,
idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatRSPImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s,
idx_shape[0],
val_xpu.dptr<DType>(),
input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1,
input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template <typename xpu>
void CheckFormatImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template <typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu>* s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template <typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype)
return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name << "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log)
return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning =
"\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet())
common::LogOnce(
"MXNET_ONEDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_ONEDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1)
common::LogOnce(
"MXNET_ONEDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template <typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first + len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len / 2, grainsize, comp);
ParallelSortHelper(first + len / 2, len - len / 2, grainsize, comp);
thr.join();
std::inplace_merge(first, first + len / 2, first + len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024 * 16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(
first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template <typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ? std::numeric_limits<T>::max()
: size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype,
std::vector<NDArray>* vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template <typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template <typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim)
<< "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kInt32 ||
dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2)
return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter =
node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1)
return dtype;
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
struct MShadowTypeInfo {
std::string name;
int size;
int acc_size;
MShadowTypeInfo(const std::string name, const int size, const int acc_size)
: name(std::move(name)), size(size), acc_size(acc_size) {}
MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {}
};
MShadowTypeInfo mshadow_type_info(const int type_flag);
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
inline index_t div_round(const index_t a, const index_t b) {
return (a + b - 1) / b;
}
inline bool IsPower2(size_t N) {
return ((N & (N - 1)) == 0) && N != 0;
}
inline size_t RoundToPower2(size_t N) {
size_t ret = 1;
size_t copyN = N;
while (N >= 2) {
ret *= 2;
N /= 2;
}
if (ret < copyN) {
ret *= 2;
}
return ret;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
nl_matrix.c | /*
* Copyright (c) 2004-2010, Bruno Levy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ALICE Project-Team nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* If you modify this software, you should include a notice giving the
* name of the person performing the modification, the date of modification,
* and the reason for such modification.
*
* Contact: Bruno Levy
*
* levy@loria.fr
*
* ALICE Project
* LORIA, INRIA Lorraine,
* Campus Scientifique, BP 239
* 54506 VANDOEUVRE LES NANCY CEDEX
* FRANCE
*
*/
#include "nl_matrix.h"
#include "nl_superlu.h"
#include "nl_cholmod.h"
#include "nl_mkl.h"
#include "nl_context.h"
#include "nl_blas.h"
/*
Some warnings about const cast in callback for
qsort() function.
*/
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
/************************************************************************/
void nlDeleteMatrix(NLMatrix M) {
if(M == NULL) {
return;
}
M->destroy_func(M);
NL_DELETE(M);
}
void nlMultMatrixVector(
NLMatrix M, const double* x, double* y
) {
M->mult_func(M,x,y);
}
/************************************************************************/
void nlRowColumnConstruct(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
c->coeff = NULL;
}
void nlRowColumnDestroy(NLRowColumn* c) {
NL_DELETE_ARRAY(c->coeff);
c->size = 0;
c->capacity = 0;
}
void nlRowColumnGrow(NLRowColumn* c) {
if(c->capacity != 0) {
c->capacity = 2 * c->capacity;
c->coeff = NL_RENEW_ARRAY(NLCoeff, c->coeff, c->capacity);
} else {
c->capacity = 4;
c->coeff = NL_NEW_ARRAY(NLCoeff, c->capacity);
}
}
void nlRowColumnAdd(NLRowColumn* c, NLuint index, NLdouble value) {
NLuint i;
for(i=0; i<c->size; i++) {
if(c->coeff[i].index == index) {
c->coeff[i].value += value;
return;
}
}
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
/* Does not check whether the index already exists */
void nlRowColumnAppend(NLRowColumn* c, NLuint index, NLdouble value) {
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
void nlRowColumnZero(NLRowColumn* c) {
c->size = 0;
}
void nlRowColumnClear(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
NL_DELETE_ARRAY(c->coeff);
}
static int nlCoeffCompare(const void* p1, const void* p2) {
return (((NLCoeff*)(p2))->index < ((NLCoeff*)(p1))->index);
}
void nlRowColumnSort(NLRowColumn* c) {
qsort(c->coeff, c->size, sizeof(NLCoeff), nlCoeffCompare);
}
/******************************************************************************/
/* CRSMatrix data structure */
/**
* \brief Destroys a NLCRSMatrix
* \details Only the memory allocated by the NLCRSMatrix is freed,
* The NLCRSMatrix structure is not freed.
* \param[in,out] M pointer to an NLCRSMatrix
* \relates NLCRSMatrix
*/
static void nlCRSMatrixDestroy(NLCRSMatrix* M) {
NL_DELETE_ARRAY(M->val);
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->sliceptr);
M->m = 0;
M->n = 0;
M->nslices = 0;
}
NLboolean nlCRSMatrixSave(NLCRSMatrix* M, const char* filename) {
NLuint nnz = M->rowptr[M->m];
FILE* f = fopen(filename, "rb");
if(f == NULL) {
nlError("nlCRSMatrixSave", "Could not open file");
return NL_FALSE;
}
fwrite(&M->m, sizeof(NLuint), 1, f);
fwrite(&M->n, sizeof(NLuint), 1, f);
fwrite(&nnz, sizeof(NLuint), 1, f);
fwrite(M->rowptr, sizeof(NLuint), M->m+1, f);
fwrite(M->colind, sizeof(NLuint), nnz, f);
fwrite(M->val, sizeof(double), nnz, f);
return NL_TRUE;
}
NLboolean nlCRSMatrixLoad(NLCRSMatrix* M, const char* filename) {
NLuint nnz = 0;
FILE* f = fopen(filename, "rb");
NLboolean truncated = NL_FALSE;
if(f == NULL) {
nlError("nlCRSMatrixLoad", "Could not open file");
return NL_FALSE;
}
truncated = truncated || (
fread(&M->m, sizeof(NLuint), 1, f) != 1 ||
fread(&M->n, sizeof(NLuint), 1, f) != 1 ||
fread(&nnz, sizeof(NLuint), 1, f) != 1
);
if(truncated) {
M->rowptr = NULL;
M->colind = NULL;
M->val = NULL;
} else {
M->rowptr = NL_NEW_ARRAY(NLuint, M->m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->val = NL_NEW_ARRAY(double, nnz);
truncated = truncated || (
fread(M->rowptr, sizeof(NLuint), M->m+1, f) != M->m+1 ||
fread(M->colind, sizeof(NLuint), nnz, f) != nnz ||
fread(M->val, sizeof(double), nnz, f) != nnz
);
}
if(truncated) {
nlError("nlCRSMatrixSave", "File appears to be truncated");
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->val);
return NL_FALSE;
} else {
M->nslices = 1;
M->sliceptr = NL_NEW_ARRAY(NLuint, M->nslices+1);
M->sliceptr[0] = 0;
M->sliceptr[1] = M->m;
}
fclose(f);
return NL_TRUE;
}
NLuint nlCRSMatrixNNZ(NLCRSMatrix* M) {
return M->rowptr[M->m];
}
static void nlCRSMatrixMultSlice(
NLCRSMatrix* M, const double* x, double* y, NLuint Ibegin, NLuint Iend
) {
NLuint i,j;
for(i=Ibegin; i<Iend; ++i) {
double sum=0.0;
for(j=M->rowptr[i]; j<M->rowptr[i+1]; ++j) {
sum += M->val[j] * x[M->colind[j]];
}
y[i] = sum;
}
}
/**
* \brief Computes a matrix-vector product
* \param[in] M a pointer to the matrix
* \param[in] x the vector to be multiplied, size = A->n
* \param[in] y where to store the result, size = A->m
* \relates NLSparseMatrix
*/
static void nlCRSMatrixMult(
NLCRSMatrix* M, const double* x, double* y
) {
int slice;
int nslices = (int)(M->nslices);
NLuint i,j,jj;
NLdouble a;
if(M->symmetric_storage) {
for(i=0; i<M->m; ++i) {
y[i] = 0.0;
}
for(i=0; i<M->m; ++i) {
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
a = M->val[jj];
j = M->colind[jj];
y[i] += a * x[j];
if(j != i) {
y[j] += a * x[i];
}
}
}
} else {
#if defined(_OPENMP)
#pragma omp parallel for private(slice)
#endif
for(slice=0; slice<nslices; ++slice) {
nlCRSMatrixMultSlice(
M,x,y,M->sliceptr[slice],M->sliceptr[slice+1]
);
}
}
nlHostBlas()->flops += (NLulong)(2*nlCRSMatrixNNZ(M));
}
void nlCRSMatrixConstruct(
NLCRSMatrix* M, NLuint m, NLuint n, NLuint nnz, NLuint nslices
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = nslices;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructSymmetric(
NLCRSMatrix* M, NLuint n, NLuint nnz
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, n+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixConstructPattern(
NLCRSMatrix* M, NLuint m, NLuint n
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint, m+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructPatternSymmetric(
NLCRSMatrix* M, NLuint n
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint, n+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixPatternSetRowLength(
NLCRSMatrix* M, NLuint i, NLuint n
) {
nl_assert(i < M->m);
nl_assert(n <= M->n);
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
/* Store row length in rowptr */
M->rowptr[i+1] = n;
}
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS);
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS) {
NLuint slice_size = CRS->rowptr[CRS->m] / CRS->nslices;
NLuint slice, cur_bound, cur_NNZ, cur_row;
/* Create "slices" to be used by parallel sparse matrix vector product */
if(CRS->sliceptr != NULL) {
cur_bound = slice_size;
cur_NNZ = 0;
cur_row = 0;
CRS->sliceptr[0]=0;
for(slice=1; slice<CRS->nslices; ++slice) {
while(cur_NNZ < cur_bound && cur_row < CRS->m) {
++cur_row;
cur_NNZ += CRS->rowptr[cur_row+1] - CRS->rowptr[cur_row];
}
CRS->sliceptr[slice] = cur_row;
cur_bound += slice_size;
}
CRS->sliceptr[CRS->nslices]=CRS->m;
}
}
void nlCRSMatrixPatternCompile(NLCRSMatrix* M) {
NLuint nslices = 8; /* TODO get number of cores */
NLuint i;
NLuint nnz,k;
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
for(i=0; i<M->m; ++i) {
M->rowptr[i+1] += M->rowptr[i];
}
nnz = M->rowptr[M->m];
M->val = NL_NEW_ARRAY(double, nnz);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
for(k=0; k<nnz; ++k) {
M->colind[k] = (NLuint)(-1);
}
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->nslices = nslices;
nlCRSMatrixComputeSlices(M);
}
void nlCRSMatrixAdd(
NLCRSMatrix* M, NLuint i, NLuint j, NLdouble value
) {
NLuint jj;
/* Test that matrix is in 'compiled' state */
nl_assert(M->colind != NULL);
nl_assert(M->val != NULL);
nl_assert(i < M->m);
nl_assert(j < M->n);
if(M->symmetric_storage && j > i) {
return;
}
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
if(M->colind[jj] == j) {
M->val[jj] += value;
return;
} else if(M->colind[jj] == (NLuint)(-1)) {
M->colind[jj] = j;
M->val[jj] += value;
return;
}
}
/* If this line is reached, it means that too many coefficients
* were added to row j, i.e. a number of coefficients larger than
* the row length previously declared with nlCRSMatrixPatternSetRowLength()
*/
nl_assert_not_reached;
}
/******************************************************************************/
/* SparseMatrix data structure */
static void nlSparseMatrixDestroyRowColumns(NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnDestroy(&(M->row[i]));
}
NL_DELETE_ARRAY(M->row);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnDestroy(&(M->column[i]));
}
NL_DELETE_ARRAY(M->column);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_COLUMNS);
}
void nlSparseMatrixDestroy(NLSparseMatrix* M) {
nl_assert(M->type == NL_MATRIX_SPARSE_DYNAMIC);
nlSparseMatrixDestroyRowColumns(M);
NL_DELETE_ARRAY(M->diag);
#ifdef NL_PARANOID
NL_CLEAR(NLSparseMatrix,M);
#endif
}
void nlSparseMatrixAdd(NLSparseMatrix* M, NLuint i, NLuint j, NLdouble value) {
nl_parano_range_assert(i, 0, M->m - 1);
nl_parano_range_assert(j, 0, M->n - 1);
if((M->storage & NL_MATRIX_STORE_SYMMETRIC) && (j > i)) {
return;
}
if(i == j) {
M->diag[i] += value;
}
if(M->storage & NL_MATRIX_STORE_ROWS) {
nlRowColumnAdd(&(M->row[i]), j, value);
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
nlRowColumnAdd(&(M->column[j]), i, value);
}
}
static void nlSparseMatrixAddSparseMatrix(
NLSparseMatrix* M, double mul, const NLSparseMatrix* N
) {
NLuint i,j,ii,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->storage & NL_MATRIX_STORE_SYMMETRIC);
}
if(N->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<N->m; ++i) {
for(jj=0; jj<N->row[i].size; ++jj) {
nlSparseMatrixAdd(
M,
i, N->row[i].coeff[jj].index,
mul*N->row[i].coeff[jj].value
);
}
}
} else {
nl_assert(N->storage & NL_MATRIX_STORE_COLUMNS);
for(j=0; j<N->n; ++j) {
for(ii=0; ii<N->column[j].size; ++ii) {
nlSparseMatrixAdd(
M,
N->column[j].coeff[ii].index, j,
mul*N->column[j].coeff[ii].value
);
}
}
}
}
static void nlSparseMatrixAddCRSMatrix(
NLSparseMatrix* M, double mul, const NLCRSMatrix* N
) {
NLuint i,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
for(i=0; i<M->m; ++i) {
for(jj=N->rowptr[i]; jj<N->rowptr[i+1]; ++jj) {
nlSparseMatrixAdd(
M,
i,
N->colind[jj],
mul*N->val[jj]
);
}
}
}
void nlSparseMatrixAddMatrix(
NLSparseMatrix* M, double mul, const NLMatrix N
) {
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->type == NL_MATRIX_SPARSE_DYNAMIC) {
nlSparseMatrixAddSparseMatrix(M, mul, (const NLSparseMatrix*)N);
} else if(N->type == NL_MATRIX_CRS) {
nlSparseMatrixAddCRSMatrix(M, mul, (const NLCRSMatrix*)N);
} else {
nl_assert_not_reached;
}
}
void nlSparseMatrixZero( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnZero(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnZero(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
void nlSparseMatrixClear( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnClear(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnClear(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
/* Returns the number of non-zero coefficients */
NLuint nlSparseMatrixNNZ( NLSparseMatrix* M) {
NLuint nnz = 0;
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nnz += M->row[i].size;
}
} else if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nnz += M->column[i].size;
}
} else {
nl_assert_not_reached;
}
return nnz;
}
void nlSparseMatrixSort( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nlRowColumnSort(&(M->row[i]));
}
}
if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nlRowColumnSort(&(M->column[i]));
}
}
}
void nlSparseMatrixMAddRow(
NLSparseMatrix* M, NLuint i1, double s, NLuint i2
) {
NLuint jj;
NLRowColumn* Ri2 = &(M->row[i2]);
NLCoeff* c = NULL;
nl_debug_assert(i1 < M->m);
nl_debug_assert(i2 < M->m);
for(jj=0; jj<Ri2->size; ++jj) {
c = &(Ri2->coeff[jj]);
nlSparseMatrixAdd(M, i1, c->index, s*c->value);
}
}
void nlSparseMatrixScaleRow(
NLSparseMatrix* M, NLuint i, double s
) {
NLuint jj;
NLRowColumn* Ri = &(M->row[i]);
NLCoeff* c = NULL;
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(!(M->storage & NL_MATRIX_STORE_COLUMNS));
nl_debug_assert(i < M->m);
for(jj=0; jj<Ri->size; ++jj) {
c = &(Ri->coeff[jj]);
c->value *= s;
}
if(i < M->diag_size) {
M->diag[i] *= s;
}
}
void nlSparseMatrixZeroRow(
NLSparseMatrix* M, NLuint i
) {
NLRowColumn* Ri = &(M->row[i]);
nl_debug_assert(i < M->m);
Ri->size = 0;
if(i < M->diag_size) {
M->diag[i] = 0.0;
}
}
/*****************************************************************************/
/* SparseMatrix x Vector routines, internal helper routines */
static void nlSparseMatrix_mult_rows_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint m = A->m;
NLuint i,ij;
NLCoeff* c = NULL;
for(i=0; i<m; i++) {
NLRowColumn* Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<Ri->size; ++ij) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
if(i != c->index) {
y[c->index] += c->value * x[i];
}
}
}
}
static void nlSparseMatrix_mult_rows(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
/*
* Note: OpenMP does not like unsigned ints
* (causes some floating point exceptions),
* therefore I use here signed ints for all
* indices.
*/
int m = (int)(A->m);
int i,ij;
NLCoeff* c = NULL;
NLRowColumn* Ri = NULL;
#if defined(_OPENMP)
#pragma omp parallel for private(i,ij,c,Ri)
#endif
for(i=0; i<m; i++) {
Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<(int)(Ri->size); ij++) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
}
}
}
static void nlSparseMatrix_mult_cols_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
y[j] = 0;
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
if(j != c->index) {
y[j] += c->value * x[c->index];
}
}
}
}
static void nlSparseMatrix_mult_cols(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
NL_CLEAR_ARRAY(NLdouble, y, A->m);
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
}
}
}
void nlSparseMatrixMult(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y
) {
nl_assert(A->type == NL_MATRIX_SPARSE_DYNAMIC);
if(A->storage & NL_MATRIX_STORE_ROWS) {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_rows_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_rows(A, x, y);
}
} else {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_cols_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_cols(A, x, y);
}
}
nlHostBlas()->flops += (NLulong)(2*nlSparseMatrixNNZ(A));
}
NLMatrix nlSparseMatrixNew(
NLuint m, NLuint n, NLenum storage
) {
NLSparseMatrix* result = NL_NEW(NLSparseMatrix);
nlSparseMatrixConstruct(result, m, n, storage);
return (NLMatrix)result;
}
void nlSparseMatrixConstruct(
NLSparseMatrix* M, NLuint m, NLuint n, NLenum storage
) {
NLuint i;
M->m = m;
M->n = n;
M->type = NL_MATRIX_SPARSE_DYNAMIC;
M->destroy_func = (NLDestroyMatrixFunc)nlSparseMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlSparseMatrixMult;
M->storage = storage;
if(storage & NL_MATRIX_STORE_ROWS) {
M->row = NL_NEW_ARRAY(NLRowColumn, m);
M->row_capacity = m;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->row[i]));
}
} else {
M->row = NULL;
M->row_capacity = 0;
}
if(storage & NL_MATRIX_STORE_COLUMNS) {
M->column = NL_NEW_ARRAY(NLRowColumn, n);
M->column_capacity = n;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->column[i]));
}
} else {
M->column = NULL;
M->column_capacity = 0;
}
M->diag_size = MIN(m,n);
M->diag_capacity = M->diag_size;
M->diag = NL_NEW_ARRAY(NLdouble, M->diag_size);
}
/**
* \brief Adjusts the size of the diagonal of
* an NLSparseMatrix after the number of rows or c
* olumns have changed.
* \param[in,out] M a pointer to the sparse matrix.
*/
static void adjust_diag(NLSparseMatrix* M) {
NLuint new_diag_size = MIN(M->m, M->n);
NLuint i;
if(new_diag_size > M->diag_size) {
if(new_diag_size > M->diag_capacity) {
M->diag_capacity *= 2;
if(M->diag_capacity == 0) {
M->diag_capacity = 16;
}
M->diag = NL_RENEW_ARRAY(double, M->diag, M->diag_capacity);
for(i=M->diag_size; i<new_diag_size; ++i) {
M->diag[i] = 0.0;
}
}
M->diag_size= new_diag_size;
}
}
void nlSparseMatrixAddRow( NLSparseMatrix* M) {
++M->m;
if(M->storage & NL_MATRIX_STORE_ROWS) {
if(M->m > M->row_capacity) {
M->row_capacity *= 2;
if(M->row_capacity == 0) {
M->row_capacity = 16;
}
M->row = NL_RENEW_ARRAY(
NLRowColumn, M->row, M->row_capacity
);
}
nlRowColumnConstruct(&(M->row[M->m-1]));
}
adjust_diag(M);
}
void nlSparseMatrixAddColumn( NLSparseMatrix* M) {
++M->n;
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
if(M->n > M->column_capacity) {
M->column_capacity *= 2;
if(M->column_capacity == 0) {
M->column_capacity = 16;
}
M->column = NL_RENEW_ARRAY(
NLRowColumn, M->column, M->column_capacity
);
}
nlRowColumnConstruct(&(M->column[M->n-1]));
}
adjust_diag(M);
}
/*****************************************************************/
NLMatrix nlCRSMatrixNewFromSparseMatrix(NLSparseMatrix* M) {
NLuint nnz = nlSparseMatrixNNZ(M);
NLuint nslices = 8; /* TODO: get number of cores */
NLuint i,ij,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->m == M->n);
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
} else {
nlCRSMatrixConstruct(CRS, M->m, M->n, nnz, nslices);
}
nlSparseMatrixSort(M);
/* Convert matrix to CRS format */
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(ij=0; ij<Ri->size; ij++) {
NLCoeff* c = &(Ri->coeff[ij]);
CRS->val[k] = c->value;
CRS->colind[k] = c->index;
++k;
}
}
CRS->rowptr[M->m] = k;
nlCRSMatrixComputeSlices(CRS);
return (NLMatrix)CRS;
}
NLMatrix nlCRSMatrixNewFromSparseMatrixSymmetric(NLSparseMatrix* M) {
NLuint nnz;
NLuint i,j,jj,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(M->m == M->n);
nlSparseMatrixSort(M);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nnz = nlSparseMatrixNNZ(M);
} else {
nnz = 0;
for(i=0; i<M->n; ++i) {
NLRowColumn* Ri = &M->row[i];
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if(j <= i) {
++nnz;
}
}
}
}
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if((M->storage & NL_MATRIX_STORE_SYMMETRIC)) {
nl_debug_assert(j <= i);
}
if(j <= i) {
CRS->val[k] = Ri->coeff[jj].value;
CRS->colind[k] = j;
++k;
}
}
}
CRS->rowptr[M->m] = k;
return (NLMatrix)CRS;
}
void nlMatrixCompress(NLMatrix* M) {
NLMatrix result = NULL;
if(
(*M)->type == NL_MATRIX_CRS &&
nlExtensionIsInitialized_MKL()
) {
result = nlMKLMatrixNewFromCRSMatrix((NLCRSMatrix*)*M);
nlDeleteMatrix(*M);
*M = result;
return;
}
if((*M)->type != NL_MATRIX_SPARSE_DYNAMIC) {
return;
}
if(nlExtensionIsInitialized_MKL()) {
result = nlMKLMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
} else {
result = nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
}
nlDeleteMatrix(*M);
*M = result;
}
NLuint nlMatrixNNZ(NLMatrix M) {
if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
return nlSparseMatrixNNZ((NLSparseMatrix*)M);
} else if(M->type == NL_MATRIX_CRS) {
return nlCRSMatrixNNZ((NLCRSMatrix*)M);
}
return M->m * M->n;
}
NLMatrix nlMatrixFactorize(NLMatrix M, NLenum solver) {
NLMatrix result = NULL;
switch(solver) {
case NL_SUPERLU_EXT:
case NL_PERM_SUPERLU_EXT:
case NL_SYMMETRIC_SUPERLU_EXT:
result = nlMatrixFactorize_SUPERLU(M,solver);
break;
case NL_CHOLMOD_EXT:
result = nlMatrixFactorize_CHOLMOD(M,solver);
break;
default:
nlError("nlMatrixFactorize","unknown solver");
}
return result;
}
/*****************************************************************/
/**
* \brief A matrix class implemented by a function.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief Matrix type
* \details One of NL_MATRIX_SPARSE_DYNAMIC,
* NL_MATRIX_CRS, NL_MATRIX_SUPERLU_EXT,
* NL_MATRIX_CHOLDMOD_EXT, NL_MATRIX_FUNCTION,
* NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
} NLFunctionMatrix;
static void nlFunctionMatrixDestroy(NLFunctionMatrix* M) {
(void)M; /* to avoid 'unused parameter' warning */
/*
* Nothing special to do,
* there is no dynamic allocated mem.
*/
}
static void nlFunctionMatrixMult(
NLFunctionMatrix* M, const NLdouble* x, NLdouble* y
) {
M->matrix_func(x,y);
}
NLMatrix nlMatrixNewFromFunction(NLuint m, NLuint n, NLMatrixFunc func) {
NLFunctionMatrix* result = NL_NEW(NLFunctionMatrix);
result->m = m;
result->n = n;
result->type = NL_MATRIX_FUNCTION;
result->destroy_func = (NLDestroyMatrixFunc)nlFunctionMatrixDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlFunctionMatrixMult;
result->matrix_func = func;
return (NLMatrix)result;
}
NLMatrixFunc nlMatrixGetFunction(NLMatrix M) {
if(M == NULL) {
return NULL;
}
if(M->type != NL_MATRIX_FUNCTION) {
return NULL;
}
return ((NLFunctionMatrix*)M)->matrix_func;
}
/******************************************************************************/
/**
* \brief A matrix class that implements the product between two matrices.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief matrix type, NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
/**
* \brief First matrix of the product.
*/
NLMatrix M;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_M;
/**
* \brief Second matrix of the product.
*/
NLMatrix N;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_N;
/**
* \brief A temporary vector of dimension N->m (= M->n)
*/
NLdouble* work;
} NLMatrixProduct;
static void nlMatrixProductDestroy(NLMatrixProduct* P) {
NL_DELETE_ARRAY(P->work);
if(P->owns_M) {
nlDeleteMatrix(P->M); P->M = NULL;
}
if(P->owns_N) {
nlDeleteMatrix(P->N); P->N = NULL;
}
}
static void nlMatrixProductMult(
NLMatrixProduct* P, const NLdouble* x, NLdouble* y
) {
nlMultMatrixVector(P->N, x, P->work);
nlMultMatrixVector(P->M, P->work, y);
}
NLMatrix nlMatrixNewFromProduct(
NLMatrix M, NLboolean owns_M, NLMatrix N, NLboolean owns_N
) {
NLMatrixProduct* result = NL_NEW(NLMatrixProduct);
nl_assert(M->n == N->m);
result->m = M->m;
result->n = N->n;
result->type = NL_MATRIX_OTHER;
result->work = NL_NEW_ARRAY(NLdouble,N->m);
result->destroy_func = (NLDestroyMatrixFunc)nlMatrixProductDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlMatrixProductMult;
result->M = M;
result->owns_M = owns_M;
result->N = N;
result->owns_N = owns_N;
return (NLMatrix)result;
}
/******************************************************************************/
|
GB_binop__isgt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_fp32
// A.*B function (eWiseMult): GB_AemultB__isgt_fp32
// A*D function (colscale): GB_AxD__isgt_fp32
// D*A function (rowscale): GB_DxB__isgt_fp32
// C+=B function (dense accum): GB_Cdense_accumB__isgt_fp32
// C+=b function (dense accum): GB_Cdense_accumb__isgt_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_fp32
// C=scalar+B GB_bind1st__isgt_fp32
// C=scalar+B' GB_bind1st_tran__isgt_fp32
// C=A+scalar GB_bind2nd__isgt_fp32
// C=A'+scalar GB_bind2nd_tran__isgt_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP32 || GxB_NO_ISGT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v) {
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction) {
eigen_internal_assert(v!=0);
m_maxThreads = *v;
} else if(action==GetAction) {
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
} else {
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel() {
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads() {
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v) {
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo {
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) {
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
HelloOMP.c | #include <stdio.h>
#include <omp.h>
int main(void) {
#pragma omp parallel
printf("(%d:!!!Hello world!!!)",
omp_get_thread_num());
return(0);
} |
GB_unop__identity_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_int16
// op(A') function: GB_unop_tran__identity_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_int16
(
uint8_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
IF97_Region1bw.c |
// Copyright Martin Lord 2014-2014.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/** IAPWS-IF97 Region 1 Equations
* VALIDITY 273.15 K <= T <= 623.15 K Psat <= p <= 100 MPa
* Exception: Backwards equations not valid in metastable (superheated liquid) region
*/
#include "IF97_common.h" //PSTAR TSTAR sqr
#include "IF97_Region1.h"
#include <math.h> // pow
//**********************************************************
//********* REGION 1 BACKWARDS EQUATIONS********************
//returns temperature (K) in region 1 for a given pressure and enthalpy
// Checked OK
double if97_r1_t_ph (double p_MPa , double h_kJperKg ){
// see Table 6
const typIF97Coeffs_IJn BW_COEFFS_R1_TPH[] = {
{0, 0, 0.0} //0 i starts at 1, so 0th i is not used
,{0, 0, -0.23872489924521E3} //1
,{0, 1, 0.40421188637945E3}
,{0, 2, 0.11349746881718E3}
,{0, 6, -0.58457616048039E1}
,{0, 22, -0.15285482413140E-3}
,{0, 32, -0.10866707695377E-5}
,{1, 0, -0.13391744872602E2}
,{1, 1, 0.43211039183559E2}
,{1, 2, -0.54010067170506E2}
,{1, 3, 0.30535892203916E2}
,{1, 4, -0.65964749423638E1}
,{1, 10, 0.93965400878363E-2}
,{1, 32, 0.11573647505340E-6}
,{2, 10, -0.25858641282073E-4}
,{2, 32, -0.40644363084799E-8}
,{3, 10, 0.66456186191635E-7}
,{3, 32, 0.80670734103027E-10}
,{4, 32, -0.93477771213947E-12}
,{5, 32, 0.58265442020601E-14}
,{6, 32, -0.15020185953503E-16} //20
};
const int MAX_BW_COEFFS_R1_TPH = 20;
const double PSTAR_R1_TPH = 1.0 ; // MPa
const double TSTAR_R1_TPH = 1.0 ;
const double HSTAR_R1_TPH = 2500.0 ; // kJ / kg
double if97pi = p_MPa / PSTAR_R1_TPH;
double if97eta = h_kJperKg / HSTAR_R1_TPH;
int i;
double dblHSum =0.0;
#pragma omp parallel for reduction(+:dblHSum) //handle loop multithreaded
for (i=1; i <= MAX_BW_COEFFS_R1_TPH; i++) {
dblHSum += BW_COEFFS_R1_TPH[i].ni * pow(if97pi, BW_COEFFS_R1_TPH[i].Ii) * pow( ( if97eta +1), BW_COEFFS_R1_TPH[i].Ji);
}
return TSTAR_R1_TPH * dblHSum;
}
//returns temperature (K) in region 1 for a given pressure and entropy
//
double if97_r1_t_ps (double p_MPa , double s_kJperKgK ){
// see Table 8
const typIF97Coeffs_IJn BW_COEFFS_R1_TPH[] = {
{0, 0, 0.0} //0 i starts at 1, so 0th i is not used
,{0, 0, 174.78268058307} //1
,{0, 1, 34.806930892873}
,{0, 2, 6.5292584978455}
,{0, 3, 0.33039981775489}
,{0, 11, -1.9281382923196E-07}
,{0, 31, -2.4909197244573E-23}
,{1, 0, -0.26107636489332}
,{1, 1, 0.22592965981586}
,{1, 2, -0.06425646339523}
,{1, 3, 0.00788762892705}
,{1, 12, 3.5672110607366E-10}
,{1, 31, 1.7332496994895E-24}
,{2, 0, 0.00056608900655}
,{2, 1, -0.0003263548314}
,{2, 2, 4.4778286690632E-05}
,{2, 9, -5.1322156908507E-10}
,{2, 31, -4.2522657042207E-26}
,{3, 10, 2.6400441360689E-13}
,{3, 32, 7.8124600459723E-29}
,{4, 32, -3.0732199903668E-31} //20
};
const int MAX_BW_COEFFS_R1_TPS = 20;
const double PSTAR_R1_TPS = 1.0 ; // MPa
const double TSTAR_R1_TPS = 1.0; // K
const double SSTAR_R1_TPS = 1.0 ; // kJ / kgK
double if97pi = p_MPa / PSTAR_R1_TPS;
double if97sigma = s_kJperKgK / SSTAR_R1_TPS;
int i;
double dblHSum =0.0;
#pragma omp parallel for reduction(+:dblHSum) //handle loop multithreaded
for (i=1; i <= MAX_BW_COEFFS_R1_TPS; i++) {
dblHSum += BW_COEFFS_R1_TPH[i].ni * pow(if97pi, BW_COEFFS_R1_TPH[i].Ii) * pow( ( if97sigma + 2), BW_COEFFS_R1_TPH[i].Ji);
}
return TSTAR_R1_TPS * dblHSum;
}
|
kmp_atomic_cas_cpt.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdbool.h>
#include <omp.h>
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void* ident_t;
extern bool
__kmpc_atomic_bool_1_cas_cpt(ident_t *loc, int gtid, char *x, char e, char d,
char *pv);
extern bool
__kmpc_atomic_bool_2_cas_cpt(ident_t *loc, int gtid, short *x, short e, short d,
short *pv);
extern bool
__kmpc_atomic_bool_4_cas_cpt(ident_t *loc, int gtid, int *x, int e, int d,
int *pv);
extern bool
__kmpc_atomic_bool_8_cas_cpt(ident_t *loc, int gtid, long long *x, long long e,
long long d, long long *pv);
extern char
__kmpc_atomic_val_1_cas_cpt(ident_t *loc, int gtid, char *x, char e, char d,
char *pv);
extern short
__kmpc_atomic_val_2_cas_cpt(ident_t *loc, int gtid, short *x, short e, short d,
short *pv);
extern int
__kmpc_atomic_val_4_cas_cpt(ident_t *loc, int gtid, int *x, int e, int d,
int *pv);
extern long long
__kmpc_atomic_val_8_cas_cpt(ident_t *loc, int gtid, long long *x, long long e,
long long d, long long *pv);
#ifdef __cplusplus
}
#endif
int main() {
int ret = 0;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
bool r;
char c0 = 1;
char c1 = 2;
char c2 = 3;
char co = 2;
char cc = 0;
char cv = 0;
short s0 = 11;
short s1 = 12;
short s2 = 13;
short so = 12;
short sc = 0;
short sv = 0;
int i0 = 211;
int i1 = 212;
int i2 = 213;
int io = 212;
int ic = 0;
int iv = 0;
long long l0 = 3111;
long long l1 = 3112;
long long l2 = 3113;
long long lo = 3112;
long long lc = 0;
long long lv = 0;
// initialize OpenMP runtime library
omp_set_dynamic(0);
// #pragma omp atomic compare update capture
// { r = x == e; if(r) { x = d; } else { v = x; } }
// char, co == c1 initially, co == c2 finally
r = __kmpc_atomic_bool_1_cas_cpt(NULL, 0, &co, c0, c2, &cv); // no-op
if (co != c1) {
ret++; printf("Error bool_1_cas_cpt no-op: %d != %d\n", co, c1); }
if (cv != co) {
ret++; printf("Error bool_1_cas_cpt no-op cpt: %d != %d\n", cv, co); }
if (r) { ret++; printf("Error bool_1_cas_cpt no-op ret: %d\n", r); }
cv = 0;
r = __kmpc_atomic_bool_1_cas_cpt(NULL, 0, &co, c1, c2, &cv);
if (co != c2) { ret++; printf("Error bool_1_cas_cpt: %d != %d\n", co, c2); }
if (cv != 0) { ret++; printf("Error bool_1_cas_cpt cpt: %d != %d\n", cv, 0); }
if (!r) { ret++; printf("Error bool_1_cas_cpt ret: %d\n", r); }
// short
r = __kmpc_atomic_bool_2_cas_cpt(NULL, 0, &so, s0, s2, &sv); // no-op
if (so != s1) {
ret++; printf("Error bool_2_cas_cpt no-op: %d != %d\n", so, s1); }
if (sv != so) {
ret++; printf("Error bool_2_cas_cpt no-op cpt: %d != %d\n", sv, so); }
if (r) { ret++; printf("Error bool_2_cas_cpt no-op ret: %d\n", r); }
sv = 0;
r = __kmpc_atomic_bool_2_cas_cpt(NULL, 0, &so, s1, s2, &sv);
if (so != s2) { ret++; printf("Error bool_2_cas_cpt: %d != %d\n", so, s2); }
if (sv != 0) { ret++; printf("Error bool_2_cas_cpt cpt: %d != %d\n", sv, 0); }
if (!r) { ret++; printf("Error bool_2_cas_cpt ret: %d\n", r); }
// int
r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &io, i0, i2, &iv); // no-op
if (io != i1) {
ret++; printf("Error bool_4_cas_cpt no-op: %d != %d\n", io, i1); }
if (iv != io) {
ret++; printf("Error bool_4_cas_cpt no-op cpt: %d != %d\n", iv, io); }
if (r) { ret++; printf("Error bool_4_cas_cpt no-op ret: %d\n", r); }
iv = 0;
r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &io, i1, i2, &iv);
if (io != i2) { ret++; printf("Error bool_4_cas_cpt: %d != %d\n", io, i2); }
if (iv != 0) { ret++; printf("Error bool_4_cas_cpt cpt: %d != %d\n", iv, 0); }
if (!r) { ret++; printf("Error bool_4_cas_cpt ret: %d\n", r); }
// long long
r = __kmpc_atomic_bool_8_cas_cpt(NULL, 0, &lo, l0, l2, &lv); // no-op
if (lo != l1) {
ret++; printf("Error bool_8_cas_cpt no-op: %lld != %lld\n", lo, l1); }
if (lv != lo) {
ret++; printf("Error bool_8_cas_cpt no-op cpt: %lld != %lld\n", lv, lo); }
if (r) { ret++; printf("Error bool_8_cas_cpt no-op ret: %d\n", r); }
lv = 0;
r = __kmpc_atomic_bool_8_cas_cpt(NULL, 0, &lo, l1, l2, &lv);
if (lo != l2) {
ret++; printf("Error bool_8_cas_cpt: %lld != %lld\n", lo, l2); }
if (lv != 0) { // should not be assigned
ret++; printf("Error bool_8_cas_cpt cpt: %lld != %d\n", lv, 0); }
if (!r) { ret++; printf("Error bool_8_cas_cpt ret: %d\n", r); }
// #pragma omp atomic compare update capture
// { if (x == e) { x = d; }; v = x; }
// char, co == c2 initially, co == c1 finally
cc = __kmpc_atomic_val_1_cas_cpt(NULL, 0, &co, c0, c1, &cv); // no-op
if (co != c2) {
ret++; printf("Error val_1_cas_cpt no-op: %d != %d\n", co, c2); }
if (cv != c2) {
ret++; printf("Error val_1_cas_cpt no-op cpt: %d != %d\n", cv, c2); }
if (cc != c2) {
ret++; printf("Error val_1_cas_cpt no-op ret: %d != %d\n", cc, c2); }
cc = __kmpc_atomic_val_1_cas_cpt(NULL, 0, &co, c2, c1, &cv);
if (co != c1) { ret++; printf("Error val_1_cas_cpt: %d != %d\n", co, c1); }
if (cv != c1) { ret++; printf("Error val_1_cas_cpt cpt: %d != %d\n", cv, c1); }
if (cc != c2) { ret++; printf("Error val_1_cas_cpt ret: %d != %d\n", cc, c2); }
// short
sc = __kmpc_atomic_val_2_cas_cpt(NULL, 0, &so, s0, s1, &sv); // no-op
if (so != s2) {
ret++; printf("Error val_2_cas_cpt no-op: %d != %d\n", so, s2); }
if (sv != s2) {
ret++; printf("Error val_2_cas_cpt no-op cpt: %d != %d\n", sv, s2); }
if (sc != s2) {
ret++; printf("Error val_2_cas_cpt no-op ret: %d != %d\n", sc, s2); }
sc = __kmpc_atomic_val_2_cas_cpt(NULL, 0, &so, s2, s1, &sv);
if (so != s1) { ret++; printf("Error val_2_cas_cpt: %d != %d\n", so, s1); }
if (sv != s1) { ret++; printf("Error val_2_cas_cpt cpt: %d != %d\n", sv, s1); }
if (sc != s2) { ret++; printf("Error val_2_cas_cpt ret: %d != %d\n", sc, s2); }
// int
ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &io, i0, i1, &iv); // no-op
if (io != i2) {
ret++; printf("Error val_4_cas_cpt no-op: %d != %d\n", io, i2); }
if (iv != i2) {
ret++; printf("Error val_4_cas_cpt no-op cpt: %d != %d\n", iv, i2); }
if (ic != i2) {
ret++; printf("Error val_4_cas_cpt no-op ret: %d != %d\n", ic, i2); }
ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &io, i2, i1, &iv);
if (io != i1) { ret++; printf("Error val_4_cas_cpt: %d != %d\n", io, i1); }
if (iv != i1) { ret++; printf("Error val_4_cas_cpt cpt: %d != %d\n", io, i1); }
if (ic != i2) { ret++; printf("Error val_4_cas_cpt ret: %d != %d\n", ic, i2); }
// long long
lc = __kmpc_atomic_val_8_cas_cpt(NULL, 0, &lo, l0, l1, &lv); // no-op
if (lo != l2) {
ret++; printf("Error val_8_cas_cpt no-op: %lld != %lld\n", lo, l2); }
if (lv != l2) {
ret++; printf("Error val_8_cas_cpt no-op cpt: %lld != %lld\n", lv, l2); }
if (lc != l2) {
ret++; printf("Error val_8_cas_cpt no-op ret: %lld != %lld\n", lc, l2); }
lc = __kmpc_atomic_val_8_cas_cpt(NULL, 0, &lo, l2, l1, &lv);
if (lo != l1) { ret++; printf("Error val_8_cas_cpt: %lld != %lld\n", lo, l1); }
if (lv != l1) {
ret++; printf("Error val_8_cas_cpt cpt: %lld != %lld\n", lv, l1); }
if (lc != l2) {
ret++; printf("Error val_8_cas_cpt ret: %lld != %lld\n", lc, l2); }
// check in parallel
i0 = 1;
i1 = 0;
for (io = 0; io < 5; ++io) {
#pragma omp parallel num_threads(2) private(i2, ic, r, iv)
{
if (omp_get_thread_num() == 0) {
// th0 waits for th1 to increment i1, then th0 increments i0
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &i0, i2, i2 + 1, &iv);
while(ic != i2) {
if (iv != ic) {
ret++;
printf("Error 1 in parallel cpt, %d != %d\n", iv, ic);
}
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas_cpt(NULL, 0, &i0, i2, i2 + 1, &iv);
}
if (iv != i2 + 1) {
ret++;
printf("Error 2 in parallel cpt, %d != %d\n", iv, i2 + 1);
}
} else {
// th1 increments i1 if it is equal to i0 - 1, letting th0 to proceed
r = 0;
while(!r) {
#pragma omp atomic read
i2 = i0;
r = __kmpc_atomic_bool_4_cas_cpt(NULL, 0, &i1, i2 - 1, i2, &iv);
}
}
}
}
if (i0 != 6 || i1 != 5) {
ret++;
printf("Error in parallel, %d != %d or %d != %d\n", i0, 6, i1, 5);
}
if (ret == 0)
printf("passed\n");
#else
printf("Unsupported architecture, skipping test...\n");
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
return ret;
}
|
mbar_log_wi_jn.c | #include "mex.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
/* inputs */
double *N_k_pr;
size_t *N_k;
double *f_k;
double *u_kln;
double *u_kn;
double *K_pr;
size_t K;
double *N_max_pr;
size_t N_max;
/* outputs */
double *log_wi_jn;
/* working variables */
size_t i, j;
size_t k, l;
size_t n;
size_t mrows;
size_t ncols;
double *FlogN;
double *log_term;
double term_sum;
double max_log_term;
double log_sum;
double u_l, u_k;
/* check inputs and outputs */
if ( nrhs < 6 ) {
mexErrMsgTxt("MEX: Not enough input arguments.");
}
/* get inputs */
N_k_pr = mxGetPr(prhs[0]);
f_k = mxGetPr(prhs[1]);
u_kln = mxGetPr(prhs[2]);
u_kn = mxGetPr(prhs[3]);
K_pr = mxGetPr(prhs[4]);
N_max_pr = mxGetPr(prhs[5]);
K = (size_t) (K_pr[0] + 0.5);
N_max = (size_t) (N_max_pr[0] + 0.5);
N_k = (size_t *) malloc(K*sizeof(size_t));
for (k = 0; k < K; k++) {
N_k[k] = (size_t) (N_k_pr[k] + 0.5);
}
#ifdef DEBUG
mexPrintf("MEX: K = %zu\n", K);
mexPrintf("MEX: N_max = %zu\n", N_max);
#endif
/* setup: working variables */
FlogN = (double *) malloc(K*sizeof(double));
for (k = 0; k < K; k++) {
FlogN[k] = log((double)N_k[k])+f_k[k];
}
/* allocate output variables */
plhs[0] = mxCreateDoubleMatrix((mwSize) K, (mwSize) N_max, mxREAL);
log_wi_jn = mxGetPr(plhs[0]);
for (k = 0; k < K; k++) {
for (n = 0; n < N_max; n++) {
log_wi_jn[k + n*K] = 0.0;
}
}
/* calculation */
#pragma omp parallel \
default(none) \
private(k, n, l, max_log_term, u_k, u_l, log_term, term_sum, log_sum) \
shared(K, N_k, u_kln, u_kn, FlogN, log_wi_jn)
{
log_term = (double *) malloc(K*sizeof(double));
#pragma omp for
for (k = 0; k < K; k++) {
for (n = 0; n < N_k[k]; n++) {
max_log_term = -1e100;
u_k = u_kn[k + n*K];
for (l = 0; l < K; l++) {
u_l = u_kln[k + l*K + n*K*K];
log_term[l] = FlogN[l] - (u_l - u_k);
if (log_term[l] > max_log_term) {max_log_term = log_term[l];}
}
term_sum = 0.0;
for (l = 0; l < K; l++) {
term_sum += exp(log_term[l]-max_log_term);
}
log_sum = log(term_sum) + max_log_term;
log_wi_jn[k + n*K] = -log_sum;
}
}
free(log_term);
}
if (N_k != NULL) {
free(N_k);
}
if (FlogN != NULL) {
free(FlogN);
}
/* exit(EXIT_SUCCESS); */
}
|
gemv_x_csr_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
gemv_csr_trans_serial(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
for (ALPHA_INT i = 0; i < m; ++i)
{
for (ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ai++)
{
ALPHA_Number val;
alpha_mul(val, alpha, A->values[ai]);
alpha_madde(y[A->col_indx[ai]], val, x[i]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
static alphasparse_status_t
gemv_csr_trans_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, m, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * n);
for (ALPHA_INT i = local_m_s; i < local_m_e; ++i)
{
const ALPHA_Number x_r = x[i];
int pkl = A->rows_start[i];
int pke = A->rows_end[i];
for (; pkl < pke - 3; pkl += 4)
{
alpha_madde(tmp[tid][A->col_indx[pkl]], A->values[pkl], x_r);
alpha_madde(tmp[tid][A->col_indx[pkl + 1]], A->values[pkl + 1], x_r);
alpha_madde(tmp[tid][A->col_indx[pkl + 2]], A->values[pkl + 2], x_r);
alpha_madde(tmp[tid][A->col_indx[pkl + 3]], A->values[pkl + 3], x_r);
}
for (; pkl < pke; ++pkl)
{
alpha_madde(tmp[tid][A->col_indx[pkl]], A->values[pkl], x_r);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < n; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_adde(tmp_y, tmp[j][i]);
}
alpha_mule(y[i],beta);
alpha_madde(y[i],alpha,tmp_y);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return gemv_csr_trans_omp(alpha, A, x, beta, y);
}
|
testing.c | /* Generated by Cython 0.29.24 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"hsl_c.c"
],
"extra_compile_args": [
"/Qpar",
"/fp:fast",
"/O2",
"/Oy",
"/Ot"
],
"include_dirs": [
"."
],
"language": "c",
"name": "HSL.testing",
"sources": [
"testing.pyx"
]
},
"module_name": "HSL.testing"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_24"
#define CYTHON_HEX_VERSION 0x001D18F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__HSL__testing
#define __PYX_HAVE_API__HSL__testing
/* Early includes */
#include <math.h>
#include <string.h>
#include <stdio.h>
#include "hsl_c.c"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"testing.pyx",
};
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/*--- Type declarations ---*/
/* "hsl.pxd":21
* rgb struct_hsl_to_rgb(double h, double s, double l)nogil;
*
* ctypedef hsl HSL_ # <<<<<<<<<<<<<<
* ctypedef rgb RGB_
*
*/
typedef struct hsl __pyx_t_3hsl_HSL_;
/* "hsl.pxd":22
*
* ctypedef hsl HSL_
* ctypedef rgb RGB_ # <<<<<<<<<<<<<<
*
* cpdef tuple rgb_to_hsl(double r, double g, double b);
*/
typedef struct rgb __pyx_t_3hsl_RGB_;
struct __pyx_opt_args_3HSL_7testing_rgb_to_hsl_testing;
/* "HSL/testing.pyx":32
*
*
* cpdef void rgb_to_hsl_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSL AND HSL TO RGB
*/
struct __pyx_opt_args_3HSL_7testing_rgb_to_hsl_testing {
int __pyx_n;
int wall_;
double tolerance_;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'libc.math' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'hsl' */
/* Module declarations from 'HSL.testing' */
static double __pyx_v_3HSL_7testing_ONE_255;
static CYTHON_INLINE void __pyx_f_3HSL_7testing_show_error(unsigned int, unsigned int, unsigned int, __pyx_t_3hsl_RGB_); /*proto*/
static void __pyx_f_3HSL_7testing_rgb_to_hsl_testing(int __pyx_skip_dispatch, struct __pyx_opt_args_3HSL_7testing_rgb_to_hsl_testing *__pyx_optional_args); /*proto*/
#define __Pyx_MODULE_NAME "HSL.testing"
extern int __pyx_module_is_main_HSL__testing;
int __pyx_module_is_main_HSL__testing = 0;
/* Implementation of 'HSL.testing' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_wall[] = "wall_";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_tolerance[] = "tolerance_";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_Mismatch_error[] = "\nMismatch error";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static PyObject *__pyx_kp_s_Mismatch_error;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_tolerance;
static PyObject *__pyx_n_s_wall;
static PyObject *__pyx_pf_3HSL_7testing_rgb_to_hsl_testing(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_wall_, double __pyx_v_tolerance_); /* proto */
static PyObject *__pyx_tuple_;
/* Late includes */
/* "HSL/testing.pyx":17
*
*
* cdef inline void show_error(unsigned int i, unsigned int j, unsigned int k, RGB_ rgb_): # <<<<<<<<<<<<<<
* """
*
*/
static CYTHON_INLINE void __pyx_f_3HSL_7testing_show_error(unsigned int __pyx_v_i, unsigned int __pyx_v_j, unsigned int __pyx_v_k, __pyx_t_3hsl_RGB_ __pyx_v_rgb_) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("show_error", 0);
/* "HSL/testing.pyx":26
* :return: void
* """
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k) # <<<<<<<<<<<<<<
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0)
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n",
*/
(void)(printf(((char const *)"\nOriginal RGB R:%d G:%d B:%d :"), __pyx_v_i, __pyx_v_j, __pyx_v_k));
/* "HSL/testing.pyx":27
* """
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k)
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0) # <<<<<<<<<<<<<<
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n",
* rgb_.r * - <double>i, rgb_.g * 255.0 - <double>j, rgb_.b * 255.0 - <double>k)
*/
(void)(printf(((char const *)"\nRetrieve RGB R:%f G:%f B:%f :"), (__pyx_v_rgb_.r * 255.0), (__pyx_v_rgb_.g * 255.0), (__pyx_v_rgb_.b * 255.0)));
/* "HSL/testing.pyx":28
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k)
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0)
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n", # <<<<<<<<<<<<<<
* rgb_.r * - <double>i, rgb_.g * 255.0 - <double>j, rgb_.b * 255.0 - <double>k)
*
*/
(void)(printf(((char const *)"\ndiff RGB dR:%g dG:%g dB:%g :\n"), (__pyx_v_rgb_.r * (-((double)__pyx_v_i))), ((__pyx_v_rgb_.g * 255.0) - ((double)__pyx_v_j)), ((__pyx_v_rgb_.b * 255.0) - ((double)__pyx_v_k))));
/* "HSL/testing.pyx":17
*
*
* cdef inline void show_error(unsigned int i, unsigned int j, unsigned int k, RGB_ rgb_): # <<<<<<<<<<<<<<
* """
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "HSL/testing.pyx":32
*
*
* cpdef void rgb_to_hsl_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSL AND HSL TO RGB
*/
static PyObject *__pyx_pw_3HSL_7testing_1rgb_to_hsl_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static void __pyx_f_3HSL_7testing_rgb_to_hsl_testing(CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_3HSL_7testing_rgb_to_hsl_testing *__pyx_optional_args) {
int __pyx_v_wall_ = ((int)0);
double __pyx_v_tolerance_ = ((double)1e-07);
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
__pyx_t_3hsl_HSL_ __pyx_v_hsl_;
__pyx_t_3hsl_RGB_ __pyx_v_rgb_;
__Pyx_RefNannyDeclarations
long __pyx_t_1;
long __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save;
#endif
__Pyx_RefNannySetupContext("rgb_to_hsl_testing", 1);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_wall_ = __pyx_optional_args->wall_;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_tolerance_ = __pyx_optional_args->tolerance_;
}
}
}
/* "HSL/testing.pyx":33
*
* cpdef void rgb_to_hsl_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil:
* """ # <<<<<<<<<<<<<<
* TEST RGB TO HSL AND HSL TO RGB
*
*/
/*try:*/ {
/* "HSL/testing.pyx":54
*
* # Loop over every RGB values possible
* for i in prange(256): # <<<<<<<<<<<<<<
* for j in range(256):
* for k in range(256):
*/
if ((1 == 0)) abort();
{
__pyx_t_3hsl_HSL_ __pyx_parallel_temp0;
int __pyx_parallel_temp1 = ((int)0xbad0bad0);
int __pyx_parallel_temp2 = ((int)0xbad0bad0);
int __pyx_parallel_temp3 = ((int)0xbad0bad0);
__pyx_t_3hsl_RGB_ __pyx_parallel_temp4;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_2 = (0x100 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_2 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_3, __pyx_t_4, __pyx_t_5) firstprivate(__pyx_t_6) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_hsl_) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_rgb_)
#endif /* _OPENMP */
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_2; __pyx_t_1++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_1);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "HSL/testing.pyx":55
* # Loop over every RGB values possible
* for i in prange(256):
* for j in range(256): # <<<<<<<<<<<<<<
* for k in range(256):
*
*/
for (__pyx_t_3 = 0; __pyx_t_3 < 0x100; __pyx_t_3+=1) {
__pyx_v_j = __pyx_t_3;
/* "HSL/testing.pyx":56
* for i in prange(256):
* for j in range(256):
* for k in range(256): # <<<<<<<<<<<<<<
*
* hsl_ = struct_rgb_to_hsl(i * ONE_255, j * ONE_255, k * ONE_255)
*/
for (__pyx_t_4 = 0; __pyx_t_4 < 0x100; __pyx_t_4+=1) {
__pyx_v_k = __pyx_t_4;
/* "HSL/testing.pyx":58
* for k in range(256):
*
* hsl_ = struct_rgb_to_hsl(i * ONE_255, j * ONE_255, k * ONE_255) # <<<<<<<<<<<<<<
* rgb_ = struct_hsl_to_rgb(hsl_.h, hsl_.s, hsl_.l)
*
*/
__pyx_v_hsl_ = struct_rgb_to_hsl((__pyx_v_i * __pyx_v_3HSL_7testing_ONE_255), (__pyx_v_j * __pyx_v_3HSL_7testing_ONE_255), (__pyx_v_k * __pyx_v_3HSL_7testing_ONE_255));
/* "HSL/testing.pyx":59
*
* hsl_ = struct_rgb_to_hsl(i * ONE_255, j * ONE_255, k * ONE_255)
* rgb_ = struct_hsl_to_rgb(hsl_.h, hsl_.s, hsl_.l) # <<<<<<<<<<<<<<
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
*/
__pyx_v_rgb_ = struct_hsl_to_rgb(__pyx_v_hsl_.h, __pyx_v_hsl_.s, __pyx_v_hsl_.l);
/* "HSL/testing.pyx":61
* rgb_ = struct_hsl_to_rgb(hsl_.h, hsl_.s, hsl_.l)
*
* if rgb_.r * 255.0 - <double>i > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.r * 255.0) - ((double)__pyx_v_i)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "HSL/testing.pyx":62
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "HSL/testing.pyx":63
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSL_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "HSL/testing.pyx":64
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 64, __pyx_L18_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 64, __pyx_L18_error)
}
}
/* "HSL/testing.pyx":62
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L19;
}
__pyx_L18_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L19:;
}
}
/* "HSL/testing.pyx":61
* rgb_ = struct_hsl_to_rgb(hsl_.h, hsl_.s, hsl_.l)
*
* if rgb_.r * 255.0 - <double>i > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
/* "HSL/testing.pyx":66
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.g * 255.0 - <double>j > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.g * 255.0) - ((double)__pyx_v_j)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "HSL/testing.pyx":67
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "HSL/testing.pyx":68
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSL_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "HSL/testing.pyx":69
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 69, __pyx_L25_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 69, __pyx_L25_error)
}
}
/* "HSL/testing.pyx":67
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L26;
}
__pyx_L25_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L26:;
}
}
/* "HSL/testing.pyx":66
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.g * 255.0 - <double>j > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
/* "HSL/testing.pyx":71
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.b * 255.0 - <double>k > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.b * 255.0) - ((double)__pyx_v_k)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "HSL/testing.pyx":72
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "HSL/testing.pyx":73
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSL_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "HSL/testing.pyx":74
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L32_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 74, __pyx_L32_error)
}
}
/* "HSL/testing.pyx":72
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L33;
}
__pyx_L32_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L33:;
}
}
/* "HSL/testing.pyx":71
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.b * 255.0 - <double>k > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
}
}
goto __pyx_L36;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L35;
__pyx_L35:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_hsl_;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_j;
__pyx_parallel_temp3 = __pyx_v_k;
__pyx_parallel_temp4 = __pyx_v_rgb_;
}
__pyx_L36:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = NULL;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_hsl_ = __pyx_parallel_temp0;
__pyx_v_i = __pyx_parallel_temp1;
__pyx_v_j = __pyx_parallel_temp2;
__pyx_v_k = __pyx_parallel_temp3;
__pyx_v_rgb_ = __pyx_parallel_temp4;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "HSL/testing.pyx":33
*
* cpdef void rgb_to_hsl_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil:
* """ # <<<<<<<<<<<<<<
* TEST RGB TO HSL AND HSL TO RGB
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
__pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
/* "HSL/testing.pyx":32
*
*
* cpdef void rgb_to_hsl_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSL AND HSL TO RGB
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_WriteUnraisable("HSL.testing.rgb_to_hsl_testing", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_L0:;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* Python wrapper */
static PyObject *__pyx_pw_3HSL_7testing_1rgb_to_hsl_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_3HSL_7testing_rgb_to_hsl_testing[] = "\n TEST RGB TO HSL AND HSL TO RGB \n \n Loop over every RGB values from 0 .. 255 and determine the HSL values corresponding to the \n RGB value.Convert the HSL value back to RGB (monitoring the maximum deviation between real value \n and calculated value) and raise an error if the deviation is over the tolerance 1e-7\n \n :param wall_ : boolean; default False; stop at test at first tolerance issue when True otherwise continue \n :param tolerance_: float; python float representing the maximum tolerance, default is 1e-7. The tolerance \n represent the maximum deviation for the original value (original value - calculated value) that should not \n exceed the tolerance\n :return: void\n ";
static PyObject *__pyx_pw_3HSL_7testing_1rgb_to_hsl_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_v_wall_;
double __pyx_v_tolerance_;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("rgb_to_hsl_testing (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wall,&__pyx_n_s_tolerance,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wall);
if (value) { values[0] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tolerance);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rgb_to_hsl_testing") < 0)) __PYX_ERR(0, 32, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
}
if (values[0]) {
__pyx_v_wall_ = __Pyx_PyObject_IsTrue(values[0]); if (unlikely((__pyx_v_wall_ == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
} else {
__pyx_v_wall_ = ((int)0);
}
if (values[1]) {
__pyx_v_tolerance_ = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_tolerance_ == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
} else {
__pyx_v_tolerance_ = ((double)1e-07);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("rgb_to_hsl_testing", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("HSL.testing.rgb_to_hsl_testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3HSL_7testing_rgb_to_hsl_testing(__pyx_self, __pyx_v_wall_, __pyx_v_tolerance_);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3HSL_7testing_rgb_to_hsl_testing(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_wall_, double __pyx_v_tolerance_) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
struct __pyx_opt_args_3HSL_7testing_rgb_to_hsl_testing __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("rgb_to_hsl_testing", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1.__pyx_n = 2;
__pyx_t_1.wall_ = __pyx_v_wall_;
__pyx_t_1.tolerance_ = __pyx_v_tolerance_;
__pyx_f_3HSL_7testing_rgb_to_hsl_testing(0, &__pyx_t_1);
__pyx_t_2 = __Pyx_void_to_None(NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("HSL.testing.rgb_to_hsl_testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{"rgb_to_hsl_testing", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3HSL_7testing_1rgb_to_hsl_testing, METH_VARARGS|METH_KEYWORDS, __pyx_doc_3HSL_7testing_rgb_to_hsl_testing},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_testing(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_testing},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"testing",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_Mismatch_error, __pyx_k_Mismatch_error, sizeof(__pyx_k_Mismatch_error), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_tolerance, __pyx_k_tolerance, sizeof(__pyx_k_tolerance), 0, 0, 1, 1},
{&__pyx_n_s_wall, __pyx_k_wall, sizeof(__pyx_k_wall), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 55, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 64, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "HSL/testing.pyx":64
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Mismatch_error); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC inittesting(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC inittesting(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_testing(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_testing(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_testing(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'testing' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_testing(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("testing", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_HSL__testing) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "HSL.testing")) {
if (unlikely(PyDict_SetItemString(modules, "HSL.testing", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
(void)__Pyx_modinit_type_init_code();
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "HSL/testing.pyx":14
* DEF TOLERANCE = 1e-7
*
* cdef double ONE_255 = 1.0 / 255.0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_3HSL_7testing_ONE_255 = (1.0 / 255.0);
/* "HSL/testing.pyx":1
* # cython: binding=False, boundscheck=False, wraparound=False, nonecheck=False, cdivision=True, optimize.use_switch=True # <<<<<<<<<<<<<<
* # encoding: utf-8
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init HSL.testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init HSL.testing");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
GB_binop__ge_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32)
// A*D function (colscale): GB (_AxD__ge_fp32)
// D*A function (rowscale): GB (_DxB__ge_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32)
// C=scalar+B GB (_bind1st__ge_fp32)
// C=scalar+B' GB (_bind1st_tran__ge_fp32)
// C=A+scalar GB (_bind2nd__ge_fp32)
// C=A'+scalar GB (_bind2nd_tran__ge_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
infgraph.h |
class VV
{
public:
vector<int> head;
vector<int> next;
vector<int> data;
vector<int> vsize;
void clear()
{
head.clear();
next.clear();
data.clear();
vsize.clear();
}
// trick for not change code
void push_back(vector<int> x){
ASSERT(x.size()==0);
addVector();
}
void addVector()
{
head.push_back(-1);
vsize.push_back(0);
}
int size(int t){
return vsize[t];
}
//vv[a].push_back(b)
void addElement( int a, int b)
{
//a.push_back(b);
vsize[a]++;
data.push_back(b);
next.push_back(head[a]);
head[a]=next.size()-1;
}
//loop
//for(int t:vv[a])
//for (int x=vv.head[a]; x!=-1; x=vv.next[x])
//{
//t=vv.data[x]
//}
};
class InfGraph:public Graph
{
public:
vector<vector<int>> hyperG;
//VV hyperG;
vector<vector<int>> hyperGT;
InfGraph(string folder, string graph_file):Graph(folder, graph_file){
hyperG.clear();
for(int i=0; i<n; i++)
hyperG.push_back(vector<int>());
for(int i=0; i<12; i++)
sfmt_init_gen_rand(&sfmtSeed, i+1234);
}
enum ProbModel {TR, WC, TR001};
ProbModel probModel;
void BuildHypergraphR(int64 R){
hyperId=R;
//for(int i=0; i<n; i++)
//hyperG[i].clear();
hyperG.clear();
for(int i=0; i<n; i++)
hyperG.push_back(vector<int>());
hyperGT.clear();
while((int)hyperGT.size() <= R)
hyperGT.push_back( vector<int>() );
for(int i=0; i<R; i++){
BuildHypergraphNode(sfmt_genrand_uint32(&sfmtSeed)%n, i, true);
}
int totAddedElement=0;
for(int i=0; i<R; i++){
for(int t:hyperGT[i])
{
hyperG[t].push_back(i);
//hyperG.addElement(t, i);
totAddedElement++;
}
}
ASSERT(hyperId == R);
}
int BuildHypergraphNode(int uStart, int hyperiiid, bool addHyperEdge){
int n_visit_edge=1;
if(addHyperEdge)
{
ASSERT((int)hyperGT.size() > hyperiiid);
hyperGT[hyperiiid].push_back(uStart);
}
int n_visit_mark=0;
//for(int i=0; i<12; i++) ASSERT((int)visit[i].size()==n);
//for(int i=0; i<12; i++) ASSERT((int)visit_mark[i].size()==n);
//hyperiiid ++;
q.clear();
q.push_back(uStart);
ASSERT(n_visit_mark < n);
visit_mark[n_visit_mark++]=uStart;
visit[uStart]=true;
while(!q.empty()) {
int expand=q.front();
q.pop_front();
if(influModel==IC){
int i=expand;
for(int j=0; j<(int)gT[i].size(); j++){
//int u=expand;
int v=gT[i][j];
n_visit_edge++;
double randDouble=double(sfmt_genrand_uint32(&sfmtSeed))/double(RAND_MAX)/2;
if(randDouble > probT[i][j])
continue;
if(visit[v])
continue;
if(!visit[v])
{
ASSERT(n_visit_mark < n);
visit_mark[n_visit_mark++]=v;
visit[v]=true;
}
q.push_back(v);
//#pragma omp critical
//if(0)
if(addHyperEdge)
{
//hyperG[v].push_back(hyperiiid);
ASSERT((int)hyperGT.size() > hyperiiid);
hyperGT[hyperiiid].push_back(v);
}
}
}
else if(influModel==LT){
if(gT[expand].size()==0)
continue;
ASSERT(gT[expand].size()>0);
n_visit_edge+=gT[expand].size();
double randDouble=double(sfmt_genrand_uint32(&sfmtSeed))/double(RAND_MAX)/2;
for(int i=0; i<(int)gT[expand].size(); i++){
ASSERT( i< (int)probT[expand].size());
randDouble -= probT[expand][i];
if(randDouble>0)
continue;
//int u=expand;
int v=gT[expand][i];
if(visit[v])
break;
if(!visit[v])
{
visit_mark[n_visit_mark++]=v;
visit[v]=true;
}
q.push_back(v);
if(addHyperEdge)
{
ASSERT((int)hyperGT.size() > hyperiiid);
hyperGT[hyperiiid].push_back(v);
}
break;
}
}
else
ASSERT(false);
}
for(int i=0; i<n_visit_mark; i++)
visit[visit_mark[i]]=false;
return n_visit_edge;
}
//return the number of edges visited
int64 hyperId = 0;
deque<int> q;
sfmt_t sfmtSeed;
set<int> seedSet;
void BuildSeedSet() {
vector< int > degree;
vector< int> visit_local(hyperGT.size());
//sort(ALL(degree));
//reverse(ALL(degree));
seedSet.clear();
for(int i=0; i<n; i++)
{
degree.push_back( hyperG[i].size() );
//degree.push_back( hyperG.size(i) );
}
ASSERT(k > 0);
ASSERT(k < (int)degree.size());
for(int i=0; i<k; i++){
auto t=max_element(degree.begin(), degree.end());
int id=t-degree.begin();
seedSet.insert(id);
degree[id]=0;
for(int t:hyperG[id]){
if(!visit_local[t]){
visit_local[t]=true;
for(int item:hyperGT[t]){
degree[item]--;
}
}
}
}
}
double InfluenceHyperGraph(){
set<int> s;
for(auto t:seedSet){
for(auto tt:hyperG[t]){
//for(int index=hyperG.head[t]; index!=-1; index=hyperG.next[index]){
//int tt=hyperG.data[index];
s.insert(tt);
}
}
double inf=(double)n*s.size()/hyperId;
return inf;
}
};
|
xnor.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file binary_inference_convolution-inl.h
* \brief
* \ref: https://arxiv.org/abs/1705.09864
* \author HPI-DeepLearning
*/
#ifndef MXNET_OPERATOR_CONTRIB_BINARY_INFERENCE_XNOR_H
#define MXNET_OPERATOR_CONTRIB_BINARY_INFERENCE_XNOR_H
#include <dmlc/logging.h>
#include <mshadow/base.h>
#include <stdlib.h>
#include <inttypes.h>
#include <assert.h>
#include <limits.h>
#include <tgmath.h>
#include <unistd.h>
#include <stdint.h>
#include <string.h>
namespace mxnet {
namespace op {
namespace xnor {
// variable, position, value
#define BIT_SET(var, pos, val) var |= (val << pos)
//uint32_t, uint64_t
#if BINARY_WORD_32 == 1
typedef uint32_t BINARY_WORD;
#endif
#if BINARY_WORD_64 == 1
typedef uint64_t BINARY_WORD;
#endif
const int BITS_PER_BINARY_WORD (sizeof(BINARY_WORD) * CHAR_BIT);
/**
* @brief returns a mshadow dtype with corresponding bitwidth to BINARY_WORD
*
*/
inline mshadow::TypeFlag corresponding_dtype() {
if (BITS_PER_BINARY_WORD == 32) {
return mshadow::kFloat32;
} else if (BITS_PER_BINARY_WORD == 64) {
return mshadow::kFloat64;
}
assert(false);
return mshadow::kFloat32;
}
/**
* @brief a helper method for print out bit wise result
* of a binary_word
*
*/
inline void print_int2Bin ( BINARY_WORD a )
{
for (int i=0; i <BITS_PER_BINARY_WORD; i++ )
{
if( a & (1 << i) )
std::cout << 1;
else
std::cout << 0;
}
std::cout<<std::endl;
}
inline void print_int2Bin64 ( uint64_t a )
{
for (int i=0; i <64; i++ )
{
if( a & (1 << i) )
std::cout << 1;
else
std::cout << 0;
}
std::cout<<std::endl;
}
/**
* @brief this method scales the _popc(xnor(...)) result
* into the dot(-1...1) result
* Example: if scale range is 8, then
* the dot product result based -1 and 1:
* -8 -6 -4 -2 0 2 4 6 8
* XNOR&POPC result:
* 0 1 2 3 4 5 6 7 8
* so the equa should be:
* dot_ouput = 2 * xnor_output - scale_range
*/
inline float xnor_to_binary_dot ( float num, int scale_range)
{
return 2*num - scale_range;
}
/**
* @brief binarize an array of floats via the sign function into a single BINARY_WORD
*
*/
inline BINARY_WORD concatenate(float* array)
{
BINARY_WORD rvalue=0;
BINARY_WORD sign;
for (int i = 0; i < BITS_PER_BINARY_WORD; i++)
{
sign = (array[i]>=0);
rvalue = rvalue | (sign<< (i));
}
return rvalue;
}
/**
* @brief binarize matrix
*
*/
inline void get_binary_row(float* row, BINARY_WORD * b_row, int size){
#pragma omp parallel for
for (int i = 0; i < size; i+=BITS_PER_BINARY_WORD) {
BINARY_WORD rvalue=0;
BINARY_WORD sign;
for (int j = 0;j < BITS_PER_BINARY_WORD; ++j) {
sign = (row[i+j]>=0);
BIT_SET(rvalue, j, sign);
}
b_row[i/BITS_PER_BINARY_WORD] = rvalue;
}
}
/**
* @brief binarize matrix column wise
*
*/
inline void get_binary_col(float* col, BINARY_WORD * b_col, int n, int k){
for(int y=0; y<(n/BITS_PER_BINARY_WORD); y++){
#pragma omp parallel for
for(int x=0; x < k; ++x){
BINARY_WORD rvalue=0;
BINARY_WORD sign;
for(int b=0; b<BITS_PER_BINARY_WORD; ++b){
sign = (col[(y*BITS_PER_BINARY_WORD+b)*k + x]>=0);
BIT_SET(rvalue, b, sign);
}
b_col[y*k + x] = rvalue;
}
}
}
/**
* @brief binarize matrix column wise.
* Loop unroll and using register vars.
* ~30% performance improvement without openmp
* compared with get_binary_col() method.
*/
inline void get_binary_col_unrolled(float* col, BINARY_WORD * b_col, int n, int k){
for(int y=0; y<(n/BITS_PER_BINARY_WORD); y++){
BINARY_WORD * y_col_pt = &b_col[y*k];
#pragma omp parallel for
for(int x=0; x < k; x+=4){
register BINARY_WORD rvalue0=0, rvalue1=0, rvalue2=0, rvalue3=0;
for(int b=0; b<BITS_PER_BINARY_WORD; b+=4){
register BINARY_WORD sign0, sign1, sign2, sign3, sign4, sign5, sign6, sign7,
sign8, sign9, sign10, sign11, sign12, sign13, sign14, sign15;
float* col_0 = &col[(y*BITS_PER_BINARY_WORD+b)*k + x];
float* col_1 = &col[(y*BITS_PER_BINARY_WORD+b+1)*k + x];
float* col_2 = &col[(y*BITS_PER_BINARY_WORD+b+2)*k + x];
float* col_3 = &col[(y*BITS_PER_BINARY_WORD+b+3)*k + x];
sign0 = (*col_0>=0);
sign1 = (*col_1>=0);
sign2 = (*col_2>=0);
sign3 = (*col_3>=0);
BIT_SET(rvalue0, b, sign0);
BIT_SET(rvalue0, (b+1), sign1);
BIT_SET(rvalue0, (b+2), sign2);
BIT_SET(rvalue0, (b+3), sign3);
sign4 = (*(col_0+1)>=0);
sign5 = (*(col_1+1)>=0);
sign6 = (*(col_2+1)>=0);
sign7 = (*(col_3+1)>=0);
BIT_SET(rvalue1, b, sign4);
BIT_SET(rvalue1, (b+1), sign5);
BIT_SET(rvalue1, (b+2), sign6);
BIT_SET(rvalue1, (b+3), sign7);
sign8 = (*(col_0+2)>=0);
sign9 = (*(col_1+2)>=0);
sign10 = (*(col_2+2)>=0);
sign11 = (*(col_3+2)>=0);
BIT_SET(rvalue2, b, sign8);
BIT_SET(rvalue2, (b+1), sign9);
BIT_SET(rvalue2, (b+2), sign10);
BIT_SET(rvalue2, (b+3), sign11);
sign12 = (*(col_0+3)>=0);
sign13 = (*(col_1+3)>=0);
sign14 = (*(col_2+3)>=0);
sign15 = (*(col_3+3)>=0);
BIT_SET(rvalue3, b, sign12);
BIT_SET(rvalue3, (b+1), sign13);
BIT_SET(rvalue3, (b+2), sign14);
BIT_SET(rvalue3, (b+3), sign15);
}
BINARY_WORD * pnter = &y_col_pt[x];
*pnter = rvalue0;
*(pnter+1) = rvalue1;
*(pnter+2) = rvalue2;
*(pnter+3) = rvalue3;
}
}
}
/**
* @brief based-line xnor-gemm implementation without
* dot product, but use XNOR and POPCNT
* __builtin_popcountll suitable for both 32bit and 64bit
*
*
*/
void xnor_gemm(int M, int N, int K,
BINARY_WORD *A, int lda,
BINARY_WORD *B, int ldb,
float *C, int ldc);
/**
* @brief simple naive baseline gemm implementation
*
*/
inline void baseline_gemm(int M, int K, int N,
float *A, int lda,
float *B, int ldb,
float *C, int ldc){
int i,n,k;
for(i = 0; i < M; ++i){
for(n = 0; n < N; ++n){
float A_PART = A[i*lda+n];
for(k = 0; k < K; ++k){
C[i*ldc+k] += A_PART * B[n*ldb+k];
}
}
}
}
} //namespace xnor
} //namespace op
} //namespace mxnet
#endif //MXNET_OPERATOR_CONTRIB_BINARY_INFERENCE_XNOR_H
|
hessian_screen.c | /* Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#include "np_helper/np_helper.h"
#include "gto/gto.h"
int int2e_sph();
int int2e_cart();
int int2e_ipvip1_cart();
int int2e_spsp1spsp2_cart();
int int2e_spsp1spsp2_sph();
/*
* Gradients screening for grad/rhf.py
*/
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFgrad_jk_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFgrad_jk_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
size_t Nbas = nbas;
size_t Nbas2 = Nbas * Nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * Nbas2*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+Nbas2, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+Nbas2, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int i, j, iijj, di, dj, ish, jsh;
size_t ij;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 9 * di*di*di*di);
double *bufx = buf;
double *bufy, *bufz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < Nbas2; ij++) {
ish = ij / Nbas;
jsh = ij - ish * Nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
bufy = buf + 4*(di*dj*di*dj);
bufz = buf + 8*(di*dj*di*dj);
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufx[iijj]));
qtmp = MAX(qtmp, fabs(bufy[iijj]));
qtmp = MAX(qtmp, fabs(bufz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFgrad_jk_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
nbas = opt->nbas;
size_t Nbas = nbas;
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
NPdset0(opt->dm_cond, Nbas * Nbas);
const size_t nao = ao_loc[nbas];
double dmax;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
dmax = MAX(dmax, fabs(pdm[i*nao+j]));
} }
}
opt->dm_cond[ish*Nbas+jsh] = dmax;
} }
}
/*
* Hessian screening for hessian/rhf.py
*/
// ijkl,ji->kl
// ijkl,li->kj
// ijkl,lj->ki
int CVHFip1ip2_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+i] > dmin)
|| (opt->dm_cond[l*n+j] > dmin));
}
void CVHFip1ip2_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFip1ip2_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFipip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFipip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
size_t Nbas = nbas;
size_t Nbas2 = Nbas * Nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+Nbas2, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+Nbas2, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int i, j, iijj, di, dj, ish, jsh;
size_t ij;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 256 * di*di*di*di);
double *bufxx = buf;
double *bufxy, *bufxz, *bufyx, *bufyy, *bufyz, *bufzx, *bufzy, *bufzz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < Nbas2; ij++) {
ish = ij / Nbas;
jsh = ij - ish * Nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
iijj = di * dj * di * dj;
bufxy = buf + ( 1*16+ 1)*iijj;
bufxz = buf + ( 2*16+ 2)*iijj;
bufyx = buf + ( 4*16+ 4)*iijj;
bufyy = buf + ( 5*16+ 5)*iijj;
bufyz = buf + ( 6*16+ 6)*iijj;
bufzx = buf + ( 8*16+ 8)*iijj;
bufzy = buf + ( 9*16+ 9)*iijj;
bufzz = buf + (10*16+10)*iijj;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufxx[iijj]));
qtmp = MAX(qtmp, fabs(bufxy[iijj]));
qtmp = MAX(qtmp, fabs(bufxz[iijj]));
qtmp = MAX(qtmp, fabs(bufyx[iijj]));
qtmp = MAX(qtmp, fabs(bufyy[iijj]));
qtmp = MAX(qtmp, fabs(bufyz[iijj]));
qtmp = MAX(qtmp, fabs(bufzx[iijj]));
qtmp = MAX(qtmp, fabs(bufzy[iijj]));
qtmp = MAX(qtmp, fabs(bufzz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFipip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,li->kj
// ijkl,kl->ij
// ijkl,ki->lj
int CVHFipvip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[l*n+i] > dmin)
|| ( opt->dm_cond[k*n+i] > dmin));
}
void CVHFipvip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFipip1_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFipvip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
GB_binop__fmod_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__fmod_fp32
// A.*B function (eWiseMult): GB_AemultB__fmod_fp32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__fmod_fp32
// C+=b function (dense accum): GB_Cdense_accumb__fmod_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__fmod_fp32
// C=scalar+B GB_bind1st__fmod_fp32
// C=scalar+B' GB_bind1st_tran__fmod_fp32
// C=A+scalar GB_bind2nd__fmod_fp32
// C=A'+scalar GB_bind2nd_tran__fmod_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = fmodf (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__fmod_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__fmod_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__fmod_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB_bind1st_tran__fmod_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp64)
// A*D function (colscale): GB (_AxD__ge_fp64)
// D*A function (rowscale): GB (_DxB__ge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp64)
// C=scalar+B GB (_bind1st__ge_fp64)
// C=scalar+B' GB (_bind1st_tran__ge_fp64)
// C=A+scalar GB (_bind2nd__ge_fp64)
// C=A'+scalar GB (_bind2nd_tran__ge_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-parallel-for.c | extern void abort (void);
main()
{
int i, a;
a = 30;
#pragma omp parallel for firstprivate (a) lastprivate (a) \
num_threads (2) schedule(static)
for (i = 0; i < 10; i++)
a = a + i;
/* The thread that owns the last iteration will have computed
30 + 5 + 6 + 7 + 8 + 9 = 65. */
if (a != 65)
abort ();
return 0;
}
|
dct_lee_cpu.h | /**
* @file dct_lee_cpu.h
* @author Yibo Lin
* @date Oct 2018
*/
#ifndef DREAMPLACE_DCT_LEE_CPU_H
#define DREAMPLACE_DCT_LEE_CPU_H
#include <vector>
#include <cmath>
#include <stdexcept>
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
namespace lee
{
constexpr double PI = 3.14159265358979323846;
/// Return true if a number is power of 2
template <typename T = unsigned>
inline bool isPowerOf2(T val)
{
return val && (val & (val - 1)) == 0;
}
/// Transpose a row-major matrix with M rows and N columns using block transpose method
template <typename TValue, typename TIndex = unsigned>
inline void transpose(const TValue *in, TValue *out, TIndex M, TIndex N, TIndex blockSize = 16)
{
//#pragma omp parallel for collapse(2) schedule(static)
for (TIndex j = 0; j < N; j += blockSize)
{
for (TIndex i = 0; i < M; i += blockSize)
{
// Transpose the block beginning at [i, j]
TIndex xend = std::min(M, i + blockSize);
TIndex yend = std::min(N, j + blockSize);
for (TIndex y = j; y < yend; ++y)
{
for (TIndex x = i; x < xend; ++x)
{
out[x + y * M] = in[y + x * N];
}
}
}
}
}
/// Negate values in odd position of a vector
template <typename TValue, typename TIndex = unsigned>
inline void negateOddEntries(TValue *vec, TIndex N, int num_threads)
{
#pragma omp parallel for num_threads(num_threads)
for (TIndex i = 1; i < N; i += 2)
{
vec[i] = -vec[i];
}
}
/// Precompute cosine values needed for N-point dct
/// @param cos size N - 1 buffer, contains the result after function call
/// @param N the length of target dct, must be power of 2
template <typename TValue, typename TIndex = unsigned>
void precompute_dct_cos(TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
TIndex offset = 0;
TIndex halfLen = N / 2;
while (halfLen)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (TIndex i = 0; i < halfLen; ++i)
{
cos[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen /= 2;
}
}
/// Precompute cosine values needed for N-point idct
/// @param cos size N - 1 buffer, contains the result after function call
/// @param N the length of target idct, must be power of 2
template <typename TValue, typename TIndex = unsigned>
void precompute_idct_cos(TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
TIndex offset = 0;
TIndex halfLen = 1;
while(halfLen < N)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (TIndex i = 0; i < halfLen; ++i)
{
cos[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen *= 2;
}
}
/// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms
/// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984
///
/// Lee's algorithm has a recursive structure in nature.
/// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
///
/// My implementation here is iterative, which is more efficient than the recursive version.
/// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT
/// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1
///
/// @param vec length N sequence to be transformed
/// @param temp length 2 * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos'
/// @param N length of vec, must be power of 2
template <typename TValue, typename TIndex = unsigned>
inline void dct(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = out;
TValue *next = buf;
// 'temp' is used to store data of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
std::copy(vec, vec + N, curr);
// Current bufferfly length and half length
TIndex len = N;
TIndex halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
TIndex cosOffset = 0;
while (halfLen)
{
TIndex offset = 0;
TIndex steps = N / len;
for (TIndex k = 0; k < steps; ++k)
{
for (TIndex i = 0; i < halfLen; ++i)
{
next[offset + i] = curr[offset + i] + curr[offset + len - i - 1];
next[offset + halfLen + i] = (curr[offset + i] - curr[offset + len -i - 1]) * cos[cosOffset + i];
}
offset += len;
}
std::swap(curr, next);
cosOffset += halfLen;
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final DCT solution
// Note that the case len = 2 will do nothing, so we start from len = 4
len = 4;
halfLen = 2;
while(halfLen < N)
{
TIndex offset = 0;
TIndex steps = N / len;
for(TIndex k = 0; k < steps; ++k)
{
for(TIndex i = 0; i < halfLen - 1; ++i)
{
next[offset + i * 2] = curr[offset + i];
next[offset + i * 2 + 1] = curr[offset + halfLen + i] + curr[offset + halfLen + i + 1];
}
next[offset + len - 2] = curr[offset + halfLen - 1];
next[offset + len - 1] = curr[offset + len - 1];
offset += len;
}
std::swap(curr, next);
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
std::copy(curr, curr+N, out);
}
}
/// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1
/// @param vec length N sequence to be transformed
/// @param temp length 2 * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos'
/// @param N length of vec, must be power of 2
template <typename TValue, typename TIndex = unsigned>
inline void idct(TValue *vec, TValue *out, TValue* buf, const TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = out;
TValue *next = buf;
// This array is used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
std::copy(vec, vec + N, curr);
curr[0] /= 2;
// Current bufferfly length and half length
TIndex len = N;
TIndex halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
while (halfLen)
{
TIndex offset = 0;
TIndex steps = N / len;
for (TIndex k = 0; k < steps; ++k)
{
next[offset] = curr[offset];
next[offset + halfLen] = curr[offset + 1];
for (TIndex i = 1; i < halfLen; ++i)
{
next[offset + i] = curr[offset + i * 2];
next[offset + halfLen + i] = curr[offset + i * 2 - 1] + curr[offset + i * 2 + 1];
}
offset += len;
}
std::swap(curr, next);
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final IDCT solution
len = 2;
halfLen = 1;
TIndex cosOffset = 0;
while(halfLen < N)
{
TIndex offset = 0;
TIndex steps = N / len;
for(TIndex k = 0; k < steps; ++k)
{
for(TIndex i = 0; i < halfLen; ++i)
{
TValue g = curr[offset + i];
TValue h = curr[offset + halfLen + i] * cos[cosOffset + i];
next[offset + i] = g + h;
next[offset + len - 1 - i] = g - h;
}
offset += len;
}
std::swap(curr, next);
cosOffset += halfLen;
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
std::copy(curr, curr+N, out);
}
}
/// Compute batch dct
/// @param mtx size M * N row-major matrix to be transformed
/// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
/// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct
/// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct
/// @param M number of rows
/// @param N number of columns
template <typename TValue, typename TIndex = unsigned>
inline void dct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N, int num_threads)
{
#pragma omp parallel for num_threads(num_threads) schedule(static)
for (TIndex i = 0; i < M; ++i)
{
dct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N);
}
}
/// Compute batch idct
/// @param mtx size M * N row-major matrix to be transformed
/// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
/// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct
/// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct
/// @param M number of rows
/// @param N number of columns
template <typename TValue, typename TIndex = unsigned>
inline void idct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N, int num_threads)
{
#pragma omp parallel for num_threads(num_threads) schedule(static)
for (TIndex i = 0; i < M; ++i)
{
idct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N);
}
}
} // End of namespace lee
DREAMPLACE_END_NAMESPACE
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
if (image->matte != MagickTrue)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->matte == MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q,
IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
IndexPacket
index;
index=(IndexPacket) pixel;
if (packet_size == 1)
index=(IndexPacket) ScaleQuantumToChar(index);
index=ConstrainColormapIndex(image,(ssize_t) index);
if (type == 0)
SetPixelIndex(indexes+x,index);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(indexes+x);
if (type != 0)
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if ((channels < 3) || (type == -2))
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case -3:
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case -4:
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
(void) SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
(void) SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 2)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->matte=MagickTrue;
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize));
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace);
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if (has_merged_image != MagickFalse || imageListLength == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.opacity=TransparentOpacity;
(void) SetImageBackgroundColor(image);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
next=image;
while (next != (Image *) NULL)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->magick_module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->magick_module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
ssize_t
i,
offset,
y;
if (next_image->compression == RLECompression)
{
offset=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
offset+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
offset=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
offset=WriteBlobMSBShort(image,Raw);
return((size_t) offset);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
length,
offset_length;
ssize_t
count;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsGrayImage(next_image,&next_image->exception) != MagickFalse))
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ?
4 : 3);
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t)
channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
ssize_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return((size_t) count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
(void) SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ?
4 : 3);
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(unsigned char) (
mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
fox_floats_timer_caching_omp_fileIO_benchmark.c | /* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 4096
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 2
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
gramSchmidt_gpu.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define COLS 1000
#define ROWS 1000
#define FLOAT_T float
FLOAT_T *getFinput(int scale)
{
FLOAT_T *input;
if ((input = (FLOAT_T *)malloc(sizeof(FLOAT_T) * scale)) == NULL)
{
fprintf(stderr, "Out of Memory!!\n");
exit(1);
}
for (int i = 0; i < scale; i++)
{
input[i] = ((FLOAT_T)rand() / (FLOAT_T)RAND_MAX) - 0.5;
}
return input;
}
FLOAT_T **get2Darr(int M, int N)
{
FLOAT_T **input;
input = (FLOAT_T **)malloc(M * sizeof(FLOAT_T *));
for (int i = 0; i < M; i++)
{
input[i] = (FLOAT_T *)malloc(N * sizeof(FLOAT_T));
}
return input;
}
void gramSchmidt_gpu(FLOAT_T **Q)
{
int cols = COLS;
#pragma omp target data map(Q[0:ROWS][0:cols])
for(int k=0; k < cols; k++)
{
double tmp = 0.0;
#pragma omp target map(tofrom: tmp)
#pragma omp parallel for reduction(+:tmp)
for(int i=0; i < ROWS; i++)
tmp += (Q[i][k] * Q[i][k]);
tmp = 1/sqrt(tmp);
#pragma omp target
#pragma omp parallel for
for(int i=0; i < ROWS; i++)
Q[i][k] *= tmp;
}
}
int main()
{
FLOAT_T **Q = get2Darr(ROWS, COLS);
gramSchmidt_gpu(Q);
return;
}
|
sync.c | /**
* \file
* \brief BOMP barrier synchronization microbenchmark
*/
/*
* Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <assert.h>
#include <barrelfish/barrelfish.h>
#include <trace/trace.h>
#include <trace_definitions/trace_defs.h>
#define PERIOD 2500000000UL
#define ITERATIONS 10
#define STACK_SIZE (64 * 1024)
struct workcnt {
uint64_t cnt;
} __attribute__ ((aligned (64)));
int main(int argc, char *argv[])
{
static struct workcnt workcnt[32];
static struct workcnt exittime[ITERATIONS];
int nthreads;
int iterations = 0;
uint64_t last;
/* uint64_t last = rdtsc(); */
/* while(rdtsc() < last + PERIOD) { */
/* thread_yield(); */
/* } */
if(argc == 2) {
nthreads = atoi(argv[1]);
bomp_bomp_init(nthreads);
omp_set_num_threads(nthreads);
} else {
assert(!"Specify number of threads");
}
#if CONFIG_TRACE
errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_BOMP,
TRACE_EVENT_BOMP_START, 0),
TRACE_EVENT(TRACE_SUBSYS_BOMP,
TRACE_EVENT_BOMP_STOP, 0), 0);
assert(err_is_ok(err));
trace_event(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_START, 0);
#endif
/* bomp_synchronize(); */
last = rdtsc();
for(int iter = 0;; iter = (iter + 1) % ITERATIONS) {
// Do some work
#pragma omp parallel
for(uint64_t i = 0;; i++) {
#pragma omp barrier
workcnt[omp_get_thread_num()].cnt++;
#pragma omp master
if(rdtsc() >= last + PERIOD) {
#if CONFIG_TRACE
trace_event(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_STOP, 0);
char *buf = malloc(4096*4096);
trace_dump(buf, 4096*4096, NULL);
printf("%s\n", buf);
abort();
#endif
printf("%s, %lu: threads %d (%s), progress ", argv[0], rdtsc(), omp_get_num_threads(), omp_get_dynamic() ? "dynamic" : "static");
for(int n = 0; n < 32; n++) {
printf("%lu ", workcnt[n].cnt);
}
printf("\n");
last += PERIOD;
iterations++;
if(iterations == 25) {
printf("client done\n");
abort();
}
if(exittime[iter].cnt == 0) {
exittime[iter].cnt = i + 3;
exittime[(iter + ITERATIONS - 2) % ITERATIONS].cnt = 0;
}
}
if(exittime[iter].cnt != 0 && exittime[iter].cnt == i) {
break;
}
}
}
}
|
GB_unop__identity_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_uint64)
// op(A') function: GB (_unop_tran__identity_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello_par.c | #include <stdio.h>
#include <omp.h>
int main ()
{
int nthreads = 4;
omp_set_num_threads(nthreads);
#pragma omp parallel
{
int id = omp_get_thread_num();
printf("Hello World from thread = %d", id);
printf(" with %d threads\n",omp_get_num_threads());
}
printf("all done, with hopefully %d threads\n",nthreads);
}
|
dot.c | // Jacobi 3D skeleton program
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timing.h"
int main(int argc, char** argv) {
double wct_start,wct_end,cput_start,cput_end,runtime,r;
int iter,size,i,j,k,n;
double *f1, *f2;
iter = 1000;
double mintime = 4.0;
if (argc != 2 && argc != 3) {
printf("Usage: %s <size> [mintime]\n",argv[0]);
exit(1);
}
if (argc == 3) {
mintime = atof(argv[2]);
}
size = atoi(argv[1]);
f1 = malloc((size_t)size*sizeof(double));
f2 = malloc((size_t)size*sizeof(double));
#pragma omp parallel for schedule(static)
for (i = 0; i < size; i++) {
f1[i] = sin( (double) i * i);
f2[i] = cos( (double) 2*i);
}
while (1) {
double sum = 0.0;
timing(&wct_start, &cput_start);
for (j = 0; j < iter; j++) {
#pragma omp parallel for reduction(+:sum) schedule(static)
for (i = 0; i < size; i++) {
sum += f1[i]*f2[i];
}
}
timing(&wct_end, &cput_end);
// making sure mintime was spent, otherwise restart with 2*iter
if (wct_end - wct_start > mintime) {
break;
}
iter = iter * 2;
}
runtime = wct_end - wct_start;
printf("size:\t%d\ttime/iter:\t%lf\tGFLOP/s:\t%lf\n", size, runtime/iter, ((double)iter) * size * 1e-9 / runtime);
return 0;
}
|
pr25990.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -O2 -std=c99" } */
typedef __SIZE_TYPE__ size_t;
typedef struct {
int _flags;
} FILE;
extern FILE *fopen (__const char *__restrict __filename,
__const char *__restrict __modes);
extern size_t fread (void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __stream) ;
extern int fclose (FILE *__stream);
extern size_t fwrite (__const void *__restrict __ptr, size_t __size,
size_t __n, FILE *__restrict __s) ;
extern void *malloc (size_t __size) __attribute__ ((__nothrow__)) __attribute__ ((__malloc__)) ;
extern size_t strlen (__const char *__s)
__attribute__ ((__nothrow__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern int strncmp (__const char *__s1, __const char *__s2, size_t __n)
__attribute__ ((__nothrow__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1, 2)));
extern int __attribute__ ((__nothrow__)) atoi (__const char *__nptr);
extern float cabsf (float _Complex __z) __attribute__ ((__nothrow__)); extern float __cabsf (float _Complex __z) __attribute__ ((__nothrow__));
extern float sqrtf (float __x) __attribute__ ((__nothrow__)); extern float __sqrtf (float __x) __attribute__ ((__nothrow__));
int main(int argc , char * argv[])
{
int AA, BB, CC;
AA = 99;
BB = 99;
CC = (int)atoi(argv[3]);
int m,n,s;
int DD,EE,num_s;
float _Complex * restrict A;
A = malloc((AA) * (BB) * (CC) * sizeof(float _Complex));
int A_slice_stride;
A_slice_stride = (AA) * (BB) ;
float * restrict f;
f = malloc(CC * sizeof(float));
FILE *fp;
fp = fopen(argv[1],"rb");
fread(A,sizeof(float _Complex),AA * BB * CC,fp);
fclose(fp);
fp = fopen(argv[2],"rb");
fread(f,sizeof(float),CC,fp);
fclose(fp);
DD = (int)atoi(argv[4]);
EE = (int)atoi(argv[5]);
num_s = (EE - DD) + 1;
float * restrict INPUT;
INPUT = malloc(4 * 4 * sizeof(float));
int m_max = 99;
int n_max = 00;
float h = 0.1;
float FF = 10;
if ((__extension__ (__builtin_constant_p (5) && ((__builtin_constant_p (argv[6]) && strlen (argv[6]) < ((size_t) (5))) || (__builtin_constant_p ("plane") && strlen ("plane") < ((size_t) (5)))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (argv[6]) && __builtin_constant_p ("plane") && (__s1_len = strlen (argv[6]), __s2_len = strlen ("plane"), (!((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) || __s2_len >= 4)) ? __builtin_strcmp (argv[6], "plane") : (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) && (__s1_len = strlen (argv[6]), __s1_len < 4) ? (__builtin_constant_p ("plane") && ((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) ? __builtin_strcmp (argv[6], "plane") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("plane"); register int __result = (((__const unsigned char *) (__const char *) (argv[6]))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (argv[6]))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("plane") && ((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) && (__s2_len = strlen ("plane"), __s2_len < 4) ? (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) ? __builtin_strcmp (argv[6], "plane") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (argv[6]); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("plane"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("plane"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("plane"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("plane"))[3]); } } __result; }))) : __builtin_strcmp (argv[6], "plane")))); }) : (__extension__ (__builtin_constant_p (5) && ((__builtin_constant_p (argv[6]) && strlen (argv[6]) < ((size_t) (5))) || (__builtin_constant_p ("plane") && strlen ("plane") < ((size_t) (5)))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (argv[6]) && __builtin_constant_p ("plane") && (__s1_len = strlen (argv[6]), __s2_len = strlen ("plane"), (!((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) || __s2_len >= 4)) ? __builtin_strcmp (argv[6], "plane") : (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) && (__s1_len = strlen (argv[6]), __s1_len < 4) ? (__builtin_constant_p ("plane") && ((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) ? __builtin_strcmp (argv[6], "plane") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("plane"); register int __result = (((__const unsigned char *) (__const char *) (argv[6]))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (argv[6]))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("plane") && ((size_t)(const void *)(("plane") + 1) - (size_t)(const void *)("plane") == 1) && (__s2_len = strlen ("plane"), __s2_len < 4) ? (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) ? __builtin_strcmp (argv[6], "plane") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (argv[6]); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("plane"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("plane"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("plane"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("plane"))[3]); } } __result; }))) : __builtin_strcmp (argv[6], "plane")))); }) : strncmp (argv[6], "plane", 5)))))==0) {
m_max = INPUT[ ( ((1)-1) + ( ((1)-1)*4 ))];
n_max = INPUT[ ( ((2)-1) + ( ((1)-1)*4 ))];
h = INPUT[ ( ((3)-1) + ( ((1)-1)*4 ))];
FF = INPUT[ ( ((4)-1) + ( ((1)-1)*4 ))];
}
if ((__extension__ (__builtin_constant_p (6) && ((__builtin_constant_p (argv[6]) && strlen (argv[6]) < ((size_t) (6))) || (__builtin_constant_p ("sphere") && strlen ("sphere") < ((size_t) (6)))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (argv[6]) && __builtin_constant_p ("sphere") && (__s1_len = strlen (argv[6]), __s2_len = strlen ("sphere"), (!((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) || __s2_len >= 4)) ? __builtin_strcmp (argv[6], "sphere") : (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) && (__s1_len = strlen (argv[6]), __s1_len < 4) ? (__builtin_constant_p ("sphere") && ((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) ? __builtin_strcmp (argv[6], "sphere") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("sphere"); register int __result = (((__const unsigned char *) (__const char *) (argv[6]))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (argv[6]))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("sphere") && ((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) && (__s2_len = strlen ("sphere"), __s2_len < 4) ? (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) ? __builtin_strcmp (argv[6], "sphere") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (argv[6]); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("sphere"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("sphere"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("sphere"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("sphere"))[3]); } } __result; }))) : __builtin_strcmp (argv[6], "sphere")))); }) : (__extension__ (__builtin_constant_p (6) && ((__builtin_constant_p (argv[6]) && strlen (argv[6]) < ((size_t) (6))) || (__builtin_constant_p ("sphere") && strlen ("sphere") < ((size_t) (6)))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (argv[6]) && __builtin_constant_p ("sphere") && (__s1_len = strlen (argv[6]), __s2_len = strlen ("sphere"), (!((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) || __s2_len >= 4)) ? __builtin_strcmp (argv[6], "sphere") : (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) && (__s1_len = strlen (argv[6]), __s1_len < 4) ? (__builtin_constant_p ("sphere") && ((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) ? __builtin_strcmp (argv[6], "sphere") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("sphere"); register int __result = (((__const unsigned char *) (__const char *) (argv[6]))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (argv[6]))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (argv[6]))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("sphere") && ((size_t)(const void *)(("sphere") + 1) - (size_t)(const void *)("sphere") == 1) && (__s2_len = strlen ("sphere"), __s2_len < 4) ? (__builtin_constant_p (argv[6]) && ((size_t)(const void *)((argv[6]) + 1) - (size_t)(const void *)(argv[6]) == 1) ? __builtin_strcmp (argv[6], "sphere") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (argv[6]); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("sphere"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("sphere"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("sphere"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("sphere"))[3]); } } __result; }))) : __builtin_strcmp (argv[6], "sphere")))); }) : strncmp (argv[6], "sphere", 6)))))==0) {
m_max = 181;
n_max = 361;
h = INPUT[ ( ((3)-1) + ( ((1)-1)*4 ))];
FF = INPUT[ ( ((4)-1) + ( ((1)-1)*4 ))];
}
float * restrict X;
X = malloc(m_max * n_max * sizeof(float));
float * restrict Y;
Y = malloc(m_max * n_max * sizeof(float));
float * restrict Z;
Z = malloc(m_max * n_max * sizeof(float));
float _Complex * restrict P;
P = malloc(m_max * n_max * sizeof(float _Complex));
float _Complex * restrict Ps;
Ps = malloc((m_max) * (n_max) * (num_s) * sizeof(float _Complex));
int Ps_slice_stride;
Ps_slice_stride = (m_max) * (n_max) ;
float GG;
float HH;
for ( n = 1 ; n <= 99 ; n++ ) {
for ( m = 1 ; m <= 99 ; m++ ) {
X[ ( ((m)-1) + ( ((n)-1)*m_max ))] = FF ;
Y[ ( ((m)-1) + ( ((n)-1)*m_max ))] = FF ;
Z[ ( ((m)-1) + ( ((n)-1)*m_max ))] = FF ;
}
}
int KK = atoi(argv[8]);
int LL = 3 * KK;
float * restrict MM;
MM = malloc(4 * LL * sizeof(float));
for ( n = 1 ; n <= n_max ; n++) {
for ( m = 1 ; m <= m_max ; m++) {
for ( s = 1 ; s <= num_s ; s++) {
Ps[ ( ((m)-1) + (((n)-1)*(m_max)) + (((s)-1)*(Ps_slice_stride)) ) ] = 0.0 + 0.0 * (__extension__ 1.0iF);
}
}
}
int liter ;
#pragma omp parallel for private(m,liter,s)
for ( n = 1 ; n <= n_max ; n++) {
for ( m = 1 ; m <= m_max ; m++) {
for ( liter = 1 ; liter <= KK ; liter++ ) {
for ( s = 1 ; s <= num_s ; s++) {
int LM_column;
float NN[4];
float OO[4];
float PP[4];
float QQ[4];
float k;
int s_index;
float RR;
s_index = s + (DD -1);
RR = f[ ( (s_index)-1) ];
k = 99.0;
NN[1 -1] = X[ ( ((m)-1) + ( ((n)-1)*m_max ))];
NN[2 -1] = Y[ ( ((m)-1) + ( ((n)-1)*m_max ))];
NN[3 -1] = Z[ ( ((m)-1) + ( ((n)-1)*m_max ))];
NN[4 -1] = 1.0;
LM_column = ((liter -1) * 3) + 1;
OO[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];
OO[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];
OO[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];
OO[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];
LM_column = ((liter -1) * 3) + 2;
PP[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];
PP[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];
PP[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];
PP[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];
LM_column = ((liter -1) * 3) + 3;
QQ[1 -1] = MM[ ( ((1)-1) + ( ((LM_column)-1)*4 ))];
QQ[2 -1] = MM[ ( ((2)-1) + ( ((LM_column)-1)*4 ))];
QQ[3 -1] = MM[ ( ((3)-1) + ( ((LM_column)-1)*4 ))];
QQ[4 -1] = MM[ ( ((4)-1) + ( ((LM_column)-1)*4 ))];
}
}
}
}
#pragma omp parallel for private(m)
for ( n = 1 ; n <= n_max ; n++) {
for ( m = 1 ; m <= m_max ; m++) {
int s;
float SSS;
float f1,f2,p1,p2,TT,h,FFF;
SSS = 0.0;
for ( s = 2 ; s <= num_s ; s++) {
f1 = f[ ( ((s-1) + (DD - 1))-1) ];
f2 = f[ ( ((s) + (DD - 1))-1) ];
p1 = cabsf(Ps[ ( ((m)-1) + (((n)-1)*(m_max)) + ((((s-1))-1)*(Ps_slice_stride)) ) ]) ;
p2 = cabsf(Ps[ ( ((m)-1) + (((n)-1)*(m_max)) + (((s)-1)*(Ps_slice_stride)) ) ]) ;
h = f2 - f1;
FFF = (f1 + f2) / 2.0;
TT = (1.0 / sqrtf(2.0)) * (((h * p1) + (0.5 * h * (p2 - p1))) * (1.0 / FFF));
SSS += TT;
}
P[ ( ((m)-1) + ( ((n)-1)*m_max ))] = SSS + ((__extension__ 1.0iF) * 0.0);
}
}
fp = fopen(argv[10],"wb");
fwrite(X,sizeof(float),m_max * n_max,fp);
fclose(fp);
fp = fopen(argv[11],"wb");
fwrite(Y,sizeof(float),m_max * n_max,fp);
fclose(fp);
fp = fopen(argv[12],"wb");
fwrite(Z,sizeof(float),m_max * n_max,fp);
fclose(fp);
fp = fopen(argv[13],"wb");
fwrite(P,sizeof(float _Complex),m_max * n_max,fp);
fclose(fp);
return(0);
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc) {
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
char A_PART = A[i * lda + k];
if (A_PART) {
for (j = 0; j < N; ++j) {
C[i * ldc + j] += B[k * ldb + j];
}
} else {
for (j = 0; j < N; ++j) {
C[i * ldc + j] -= B[k * ldb + j];
}
}
}
}
}
float *random_matrix(int rows, int cols) {
int i;
float *m = calloc(rows * cols, sizeof(float));
for (i = 0; i < rows * cols; ++i) {
m[i] = (float) rand() / RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n) {
float *a;
if (!TA) a = random_matrix(m, k);
else a = random_matrix(k, m);
int lda = (!TA) ? k : m;
float *b;
if (!TB) b = random_matrix(k, n);
else b = random_matrix(n, k);
int ldb = (!TB) ? n : k;
float *c = random_matrix(m, n);
int i;
clock_t start = clock(), end;
for (i = 0; i < 10; ++i) {
gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n", m, k, k, n, TA, TB,
(float) (end - start) / CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc) {
gemm_cpu(TA, TB, M, N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i * ldc + j] += A_PART * B[k * ldb + j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
register float sum = 0;
for (k = 0; k < K; ++k) {
sum += ALPHA * A[i * lda + k] * B[j * ldb + k];
}
C[i * ldc + j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA * A[k * lda + i];
for (j = 0; j < N; ++j) {
C[i * ldc + j] += A_PART * B[k * ldb + j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
register float sum = 0;
for (k = 0; k < K; ++k) {
sum += ALPHA * A[i + k * lda] * B[k + j * ldb];
}
C[i * ldc + j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc) {
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i * ldc + j] *= BETA;
}
}
if (!TA && !TB)
gemm_nn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else if (TA && !TB)
gemm_tn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else if (!TA && TB)
gemm_nt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else
gemm_tt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
conv3x3s1_winograd64_pack4_neon_dot.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_pack4_neon_dot(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt,
int outch, int inch, int outh, int outw)
{
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
Mat bottom_blob_tm2 = bottom_blob;
Mat top_blob_tm = top_blob;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm/8 * w_tm/8;
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i=0;
for (; i+11<tiles; i+=12)
{
const float* r0 = bb2.row(i/12);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+7<tiles; i+=8)
{
const float* r0 = bb2.row(i/12 + (i%12)/8);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r4 r5 r6 r7
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<tiles; i+=4)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i+1<tiles; i+=2)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
}
for (; i<tiles; i++)
{
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
const float* k01 = kernel01_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n"// r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"// w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"
);
}
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p/2+p%2);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i=0;
#if __aarch64__
for (; i+11<tiles; i+=12)
{
const float* r0 = bb2.row(i/12);
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif
for (; i+7<tiles; i+=8)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8);
#else
const float* r0 = bb2.row(i/8);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; i+3<tiles; i+=4)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif
}
for (; i+1<tiles; i+=2)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"
);
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"
);
#endif
}
for (; i<tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
#else
const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"// r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"
);
#else
asm volatile(
"veor q8, q8 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif
}
}
}
}
}
}
|
main.c | #include <unistd.h>
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int N_BUCKETS;
struct Bucket {
int* array;
size_t n_elem;
size_t max_elem;
};
struct Bucket* make(size_t max){
struct Bucket* block_arr = malloc(sizeof(struct Bucket));
block_arr->array = malloc(sizeof(int) * max);
block_arr->n_elem = 0;
block_arr->max_elem = max;
return block_arr;
}
void free_bucket(struct Bucket* b){
free(b->array);
free(b);
}
__inline__
void buckets_to_arr(size_t size, struct Bucket* arr, int* res) {
memcpy(res, arr->array, arr->n_elem * sizeof(int));
}
void insert(struct Bucket* arr, int elem){
if(arr->n_elem >= arr->max_elem){
arr->array = realloc(arr->array, sizeof(int) * arr->max_elem * 2);
arr->max_elem *= 2;
}
arr->array[arr->n_elem] = elem;
arr->n_elem++;
}
int cmpfunc (const void * a, const void * b) {
return ( *(int*)a - *(int*)b );
}
void bucket_sort(size_t size, int* arr){
if (!size){
fprintf(stderr, "Can't calculate max of empty array");
_exit(1);
}
int max = arr[0];
int min = arr[0];
for(size_t i = 1; i < size; i++) { //vectorized
if(arr[i] > max) max = arr[i];
if(arr[i] < min) min = arr[i];
}
#ifdef NDEBUG
fprintf(stderr, "max: %d, min: %d\n", max, min);
#endif
struct Bucket* buckets[N_BUCKETS];
for (size_t i = 0; i < N_BUCKETS; i++)
buckets[i] = make(size);
#pragma omp parallel
{
#pragma omp for
for (size_t i = 0; i < N_BUCKETS; i++) {
for (size_t j = 0; j < size; j++){
size_t n_bucket = (arr[j] + abs(min)) * N_BUCKETS / (abs(max + abs(min)));
n_bucket = n_bucket >= N_BUCKETS ? N_BUCKETS - 1 : n_bucket;
if(n_bucket == i) {
#ifdef NDEBUG
fprintf(stderr, "%zu<- %d\n", i, arr[j]);
#endif
buckets[i]->array[buckets[i]->n_elem++] = arr[j];
}
}
}
size_t i = 0, j;
#pragma omp for
for(j = 0; j < N_BUCKETS; j++) {
qsort(buckets[j]->array, buckets[j]->n_elem, sizeof(int), cmpfunc);
memcpy(arr + i, buckets[j]->array, buckets[j]->n_elem * sizeof(int));
i += buckets[j]->n_elem;
free_bucket(buckets[j]);
}
}
}
void print_arr(int* arr, size_t size){
for(size_t i = 0; i < size; i++) {
arr[i] = arr[i];
printf("%d\n", arr[i]);
}
}
int main(int argc, char** argv){
N_BUCKETS = 10;
struct Bucket* b = make(100);
FILE* file = fopen (argv[1], "r");
int v;
while (!feof (file)){
fscanf (file, "%d\n", &v);
insert(b, v);
}
size_t size = b->n_elem;
#ifdef NDEBUG
print_arr(b->array, size);
#endif
double time = omp_get_wtime();
bucket_sort(size, b->array);
fprintf(stderr, "Time=%f\n", omp_get_wtime()-time);
print_arr(b->array, size);
free_bucket(b);
}
|
GB_unop__log1p_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fp64_fp64)
// op(A') function: GB (_unop_tran__log1p_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = log1p (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log1p (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = log1p (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log1p (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log1p (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
polynom.h | #pragma once
#include <utility>
#include <complex>
#include <cmath>
#include <boost/unordered_map.hpp>
#include "normalform/monom.h"
#include "normalform/monomcoeff.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace normalform {
using std::complex;
template<class Tfloat>
inline bool isZero(const complex<Tfloat> x)
{
return (std::abs(x.real()) < (Tfloat)1e-8) && (std::abs(x.imag()) < (Tfloat)1e-8);
}
// friend functions
template<size_t,class Tfloat> class CPolynom;
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator +(const CPolynom<N,Tfloat>&, const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator +(const CPolynom<N,Tfloat>&, const CMonomCoeff<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator +(const CMonomCoeff<N,Tfloat>&, const CMonomCoeff<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>&, const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>&, const CMonomCoeff<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator -(const CMonomCoeff<N,Tfloat>&, const CMonomCoeff<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator *(const CPolynom<N,Tfloat>&, const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator *(const CPolynom<N,Tfloat>&, const complex<Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator *(const complex<Tfloat>&, const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator ^(const CPolynom<N,Tfloat>&, const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat> CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>&);
template<size_t N,class Tfloat=double>
class CPolynom
{
public:
typedef boost::unordered_map<CMonom<N>,complex<Tfloat> > CMonomMap;
CMonomMap list;
CPolynom<N,Tfloat>()
{};
CPolynom<N,Tfloat>(const CPolynom<N,Tfloat>& p)
{
list = p.list;
};
CPolynom<N,Tfloat>& operator =(const CPolynom<N,Tfloat>& p)
{
list = p.list;
return *this;
};
void Clear()
{
list.clear();
};
void Simplify();
friend CPolynom<N,Tfloat> operator +<>(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2);
friend CPolynom<N,Tfloat> operator +<>(const CPolynom<N,Tfloat>& p, const CMonomCoeff<N,Tfloat>& m);
friend CPolynom<N,Tfloat> operator +<>(const CMonomCoeff<N,Tfloat>& m1, const CMonomCoeff<N,Tfloat>& m2);
friend CPolynom<N,Tfloat> operator -<>(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2);
friend CPolynom<N,Tfloat> operator -<>(const CPolynom<N,Tfloat>& p, const CMonomCoeff<N,Tfloat>& m);
friend CPolynom<N,Tfloat> operator -<>(const CMonomCoeff<N,Tfloat>& m1, const CMonomCoeff<N,Tfloat>& m2);
friend CPolynom<N,Tfloat> operator *<>(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2);
friend CPolynom<N,Tfloat> operator *<>(const CPolynom<N,Tfloat>& p, const complex<Tfloat>& r);
friend CPolynom<N,Tfloat> operator *<>(const complex<Tfloat>& r, const CPolynom<N,Tfloat>& p);
friend CPolynom<N,Tfloat> operator ^<>(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2);
friend CPolynom<N,Tfloat> operator -<>(const CPolynom<N,Tfloat>& p);
CPolynom<N,Tfloat>& operator +=(const CPolynom<N,Tfloat>& p);
CPolynom<N,Tfloat>& operator -=(const CPolynom<N,Tfloat>& p);
CPolynom<N,Tfloat>& operator +=(const CMonomCoeff<N,Tfloat>& m);
CPolynom<N,Tfloat>& operator -=(const CMonomCoeff<N,Tfloat>& m);
CPolynom<N,Tfloat>& operator *=(const complex<Tfloat>& r);
};
template<size_t N,class Tfloat>
inline void CPolynom<N,Tfloat>::Simplify()
{
if(list.empty())
return;
for(typename CMonomMap::const_iterator it = list.begin(); it != list.end();)
{
if(isZero(it->second))
list.erase(it++);
else
++it;
}
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>& p)
{
CPolynom<N,Tfloat> pm(p);
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = pm.list.begin(); it != pm.list.end(); ++it)
pm.list[it->first] = - pm.list[it->first];
return pm;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator +(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2)
{
CPolynom<N,Tfloat> p(p1);
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p2.list.begin(); it != p2.list.end(); ++it)
p.list[it->first] += it->second;
return p;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat>& CPolynom<N,Tfloat>::operator +=(const CPolynom<N,Tfloat>& p)
{
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p.list.begin(); it != p.list.end(); ++it)
list[it->first] += it->second;
return *this;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator +(const CPolynom<N,Tfloat>& p, const CMonomCoeff<N,Tfloat>& mc)
{
CPolynom<N,Tfloat> p1(p);
p1.list[mc.monom] += mc.coeff;
return p1;
}
template<size_t N,class Tfloat>
CPolynom<N,Tfloat>& CPolynom<N,Tfloat>::operator +=(const CMonomCoeff<N,Tfloat>& mc)
{
list[mc.monom] += mc.coeff;
return *this;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator +(const CMonomCoeff<N,Tfloat>& m1, const CMonomCoeff<N,Tfloat>& m2)
{
CPolynom<N,Tfloat> p;
p.list[m1.monom] += m1.coeff;
p.list[m2.monom] += m2.coeff;
return p;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2)
{
CPolynom<N,Tfloat> p(p1);
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p2.list.begin(); it != p2.list.end(); ++it)
p.list[it->first] -= it->second;
return p;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat>& CPolynom<N,Tfloat>::operator -=(const CPolynom<N,Tfloat>& p)
{
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p.list.begin(); it != p.list.end(); ++it)
list[it->first] -= it->second;
return *this;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator -(const CPolynom<N,Tfloat>& p, const CMonomCoeff<N,Tfloat>& mc)
{
CPolynom<N,Tfloat> p1(p);
p1.list[mc.monom] -= mc.coeff;
return p1;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat>& CPolynom<N,Tfloat>::operator -=(const CMonomCoeff<N,Tfloat>& mc)
{
list[mc.monom] -= mc.coeff;
return *this;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator -(const CMonomCoeff<N,Tfloat>& m1, const CMonomCoeff<N,Tfloat>& m2)
{
CPolynom<N,Tfloat> p;
p.list[m1.monom] -= m1.coeff;
p.list[m2.monom] -= m2.coeff;
return p;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator *(const CPolynom<N,Tfloat>& p1, const CPolynom<N,Tfloat>& p2)
{
CPolynom<N,Tfloat> p;
if(p1.list.empty() || p2.list.empty())
return p;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it1 = p1.list.begin(); it1 != p1.list.end(); ++it1)
{
CMonomCoeff<N,Tfloat> mc1(it1);
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it2 = p2.list.begin(); it2 != p2.list.end(); ++it2)
{
CMonomCoeff<N,Tfloat> mc2(it2);
p += mc1 * mc2;
}
}
return p;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator *(const CPolynom<N,Tfloat>& p, const complex<Tfloat>& r)
{
if(isZero(r))
return CPolynom<N,Tfloat>();
CPolynom<N,Tfloat> p1(p);
if(isZero(r - (complex<Tfloat>)1))
return p1;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p.list.begin(); it != p.list.end(); ++it)
p1.list[it->first] = it->second * r;
return p1;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator *(const complex<Tfloat>& r, const CPolynom<N,Tfloat>& p)
{
if(isZero(r))
return CPolynom<N,Tfloat>();
CPolynom<N,Tfloat> p1(p);
if(isZero(r - (complex<Tfloat>)1))
return p1;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p.list.begin(); it != p.list.end(); ++it)
p1.list[it->first] = it->second * r;
return p1;
}
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat>& CPolynom<N,Tfloat>::operator *=(const complex<Tfloat>& r)
{
if(isZero(r))
{
Clear();
return *this;
}
if(isZero(r-(complex<Tfloat>)1))
return *this;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = list.begin(); it != list.end(); ++it)
list[it->first] = it->second * r;
return *this;
}
/*
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> diff(const CPolynom<N,Tfloat>& p, const size_t j)
{
CPolynom<N,Tfloat> D;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator it = p.list.begin(); it != p.list.end(); ++it)
if(it->first[j])
{
CMonomCoeff<N,Tfloat> d(it);
d.coeff *= (complex<Tfloat>)d.monom[j];
d.monom[j]--;
D += d;
}
return D;
}
*/
template<size_t N,class Tfloat>
inline CPolynom<N,Tfloat> operator ^(const CPolynom<N,Tfloat>& F, const CPolynom<N,Tfloat>& G)
{
// CPolynom<N,Tfloat> C;
// for(size_t j = 0; j < N; j++)
// C += diff(F, j) * diff(G, j + N) - diff(G, j) * diff(F, j + N);
// return C;
#ifdef _OPENMP
if(F.list.size() < G.list.size())
{
return -(G^F);
}
#endif
CPolynom<N,Tfloat> C;
#pragma omp parallel shared(C)
{
#ifdef _OPENMP
int thread_count = omp_get_num_threads();
int thread_num = omp_get_thread_num();
size_t chunk_size = F.list.size() / thread_count;
typename CPolynom<N,Tfloat>::CMonomMap::const_iterator begin = F.list.begin();
std::advance(begin, thread_num * chunk_size);
typename CPolynom<N,Tfloat>::CMonomMap::const_iterator end = begin;
if(thread_num == thread_count - 1)
end = F.list.end();
else
std::advance(end, chunk_size);
#else
typename CPolynom<N,Tfloat>::CMonomMap::const_iterator begin = F.list.begin();
typename CPolynom<N,Tfloat>::CMonomMap::const_iterator end = F.list.end();
#endif
#pragma omp barrier
CPolynom<N,Tfloat> Cchunk;
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator itF = begin; itF != end; ++itF)
{
CMonomCoeff<N,Tfloat> mcF(itF);
for(typename CPolynom<N,Tfloat>::CMonomMap::const_iterator itG = G.list.begin(); itG != G.list.end(); ++itG)
{
CMonomCoeff<N,Tfloat> mcG(itG);
CMonomCoeff<N,Tfloat> mcFG = mcF*mcG;
for(size_t j = 0; j < N; j++)
{
int diff = mcF.monom[j] * mcG.monom[j+N] - mcG.monom[j] * mcF.monom[j+N];
if(diff)
{
CMonomCoeff<N,Tfloat> mc(mcFG);
mc.coeff *= complex<Tfloat>(diff);
mc.monom[j]--;
mc.monom[j+N]--;
Cchunk += mc;
}
}
}
}
#pragma omp critical
C += Cchunk;
}
C.Simplify();
return C;
}
} // namespace normalform |
uts.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/**********************************************************************************************/
/*
* Copyright (c) 2007 The Unbalanced Tree Search (UTS) Project Team:
* -----------------------------------------------------------------
*
* This file is part of the unbalanced tree search benchmark. This
* project is licensed under the MIT Open Source license. See the LICENSE
* file for copyright and licensing information.
*
* UTS is a collaborative project between researchers at the University of
* Maryland, the University of North Carolina at Chapel Hill, and the Ohio
* State University.
*
* University of Maryland:
* Chau-Wen Tseng(1) <tseng at cs.umd.edu>
*
* University of North Carolina, Chapel Hill:
* Jun Huan <huan,
* Jinze Liu liu,
* Stephen Olivier olivier,
* Jan Prins* prins at cs.umd.edu>
*
* The Ohio State University:
* James Dinan <dinan,
* Gerald Sabin sabin,
* P. Sadayappan* saday at cse.ohio-state.edu>
*
* Supercomputing Research Center
* D. Pryor
*
* (1) - indicates project PI
*
* UTS Recursive Depth-First Search (DFS) version developed by James Dinan
*
* Adapted for OpenMP 3.0 Task-based version by Stephen Olivier
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#include "app-desc.h"
#include "bots.h"
#include "uts.h"
/***********************************************************
* Global state *
***********************************************************/
unsigned long long nLeaves = 0;
int maxTreeDepth = 0;
/***********************************************************
* Tree generation strategy is controlled via various *
* parameters set from the command line. The parameters *
* and their default values are given below. *
* Trees are generated using a Galton-Watson process, in *
* which the branching factor of each node is a random *
* variable. *
* *
* The random variable follow a binomial distribution. *
***********************************************************/
double b_0 = 4.0; // default branching factor at the root
int rootId = 0; // default seed for RNG state at root
/***********************************************************
* The branching factor at the root is specified by b_0.
* The branching factor below the root follows an
* identical binomial distribution at all nodes.
* A node has m children with prob q, or no children with
* prob (1-q). The expected branching factor is q * m.
*
* Default parameter values
***********************************************************/
int nonLeafBF = 4; // m
double nonLeafProb = 15.0 / 64.0; // q
/***********************************************************
* compute granularity - number of rng evaluations per
* tree node
***********************************************************/
int computeGranularity = 1;
/***********************************************************
* expected results for execution
***********************************************************/
unsigned long long exp_tree_size = 0;
int exp_tree_depth = 0;
unsigned long long exp_num_leaves = 0;
/***********************************************************
* FUNCTIONS *
***********************************************************/
// Interpret 32 bit positive integer as value on [0,1)
double rng_toProb(int n)
{
if (n < 0) {
printf("*** toProb: rand n = %d out of range\n",n);
}
return ((n<0)? 0.0 : ((double) n)/2147483648.0);
}
void uts_initRoot(Node * root)
{
root->height = 0;
root->numChildren = -1; // means not yet determined
rng_init(root->state.state, rootId);
bots_message("Root node at %p\n", root);
}
int uts_numChildren_bin(Node * parent)
{
// distribution is identical everywhere below root
int v = rng_rand(parent->state.state);
double d = rng_toProb(v);
return (d < nonLeafProb) ? nonLeafBF : 0;
}
int uts_numChildren(Node *parent)
{
int numChildren = 0;
/* Determine the number of children */
if (parent->height == 0) numChildren = (int) floor(b_0);
else numChildren = uts_numChildren_bin(parent);
// limit number of children
// only a BIN root can have more than MAXNUMCHILDREN
if (parent->height == 0) {
int rootBF = (int) ceil(b_0);
if (numChildren > rootBF) {
bots_debug("*** Number of children of root truncated from %d to %d\n", numChildren, rootBF);
numChildren = rootBF;
}
}
else {
if (numChildren > MAXNUMCHILDREN) {
bots_debug("*** Number of children truncated from %d to %d\n", numChildren, MAXNUMCHILDREN);
numChildren = MAXNUMCHILDREN;
}
}
return numChildren;
}
/***********************************************************
* Recursive depth-first implementation *
***********************************************************/
unsigned long long parallel_uts ( Node *root )
{
unsigned long long num_nodes = 0 ;
root->numChildren = uts_numChildren(root);
bots_message("Computing Unbalance Tree Search algorithm ");
#pragma omp parallel
#pragma omp single nowait //consider removing nowait and see how it affects perfomance
#pragma omp task
num_nodes = parTreeSearch( 0, root, root->numChildren );
bots_message(" completed!");
return num_nodes;
}
unsigned long long parTreeSearch(int depth, Node *parent, int numChildren)
{
Node n[numChildren], *nodePtr;
int i, j;
unsigned long long subtreesize = 1, partialCount[numChildren];
//this is to prevent use of unnecessary taskwaits which braek the profiler assertions
//if (numChildren == 0){
//printf("[uts bots parTreeSearch] numChildren == 0\n");
// return subtreesize;
//}
// Recurse on the children
for (i = 0; i < numChildren; i++) {
nodePtr = &n[i];
nodePtr->height = parent->height + 1;
// The following line is the work (one or more SHA-1 ops)
for (j = 0; j < computeGranularity; j++) {
rng_spawn(parent->state.state, nodePtr->state.state, i);
}
nodePtr->numChildren = uts_numChildren(nodePtr);
#pragma omp task firstprivate(i, nodePtr) shared(partialCount)
partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren);
}
#pragma omp taskwait
for (i = 0; i < numChildren; i++) {
subtreesize += partialCount[i];
}
return subtreesize;
}
void uts_read_file ( char *filename )
{
FILE *fin;
if ((fin = fopen(filename, "r")) == NULL) {
bots_message("Could not open input file (%s)\n", filename);
exit (-1);
}
fscanf(fin,"%lf %lf %d %d %d %llu %d %llu",
&b_0,
&nonLeafProb,
&nonLeafBF,
&rootId,
&computeGranularity,
&exp_tree_size,
&exp_tree_depth,
&exp_num_leaves
);
fclose(fin);
computeGranularity = max(1,computeGranularity);
// Printing input data
bots_message("\n");
bots_message("Root branching factor = %f\n", b_0);
bots_message("Root seed (0 <= 2^31) = %d\n", rootId);
bots_message("Probability of non-leaf node = %f\n", nonLeafProb);
bots_message("Number of children for non-leaf node = %d\n", nonLeafBF);
bots_message("E(n) = %f\n", (double) ( nonLeafProb * nonLeafBF ) );
bots_message("E(s) = %f\n", (double) ( 1.0 / (1.0 - nonLeafProb * nonLeafBF) ) );
bots_message("Compute granularity = %d\n", computeGranularity);
bots_message("Random number generator = "); rng_showtype();
}
void uts_show_stats( void )
{
int nPes = atoi(bots_resources);
int chunkSize = 0;
bots_message("\n");
bots_message("Tree size = %llu\n", (unsigned long long) bots_number_of_tasks );
bots_message("Maximum tree depth = %d\n", maxTreeDepth );
bots_message("Chunk size = %d\n", chunkSize );
bots_message("Number of leaves = %llu (%.2f%%)\n", nLeaves, nLeaves/(float)bots_number_of_tasks*100.0 );
bots_message("Number of PE's = %.4d threads\n", nPes );
bots_message("Wallclock time = %.3f sec\n", bots_time_program );
bots_message("Overall performance = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program) );
bots_message("Performance per PE = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program / nPes) );
}
int uts_check_result ( void )
{
int answer = BOTS_RESULT_SUCCESSFUL;
if ( bots_number_of_tasks != exp_tree_size ) {
answer = BOTS_RESULT_UNSUCCESSFUL;
bots_message("Incorrect tree size result (%llu instead of %llu).\n", bots_number_of_tasks, exp_tree_size);
}
return answer;
}
|
dualize.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include "misc.h"
#include "../boundary_matrix.h"
#include "../persistence_pairs.h"
namespace phat {
template< typename Representation >
void dualize( boundary_matrix< Representation >& boundary_matrix ) {
std::vector< dimension > dual_dims;
std::vector< std::vector< index > > dual_matrix;
index nr_of_columns = boundary_matrix.get_num_cols();
dual_matrix.resize( nr_of_columns );
dual_dims.resize( nr_of_columns );
std::vector< index > dual_sizes( nr_of_columns, 0 );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
boundary_matrix.get_col( cur_col, temp_col );
for( index idx = 0; idx < (index)temp_col.size(); idx++)
dual_sizes[ nr_of_columns - 1 - temp_col[ idx ] ]++;
}
#pragma omp parallel for
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ )
dual_matrix[cur_col].reserve(dual_sizes[cur_col]);
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
boundary_matrix.get_col( cur_col, temp_col );
for( index idx = 0; idx < (index)temp_col.size(); idx++)
dual_matrix[ nr_of_columns - 1 - temp_col[ idx ] ].push_back( nr_of_columns - 1 - cur_col );
}
const dimension max_dim = boundary_matrix.get_max_dim();
#pragma omp parallel for
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ )
dual_dims[ nr_of_columns - 1 - cur_col ] = max_dim - boundary_matrix.get_dim( cur_col );
#pragma omp parallel for
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ )
std::reverse( dual_matrix[ cur_col ].begin(), dual_matrix[ cur_col ].end() );
boundary_matrix.load_vector_vector( dual_matrix, dual_dims );
}
inline void dualize_persistence_pairs( persistence_pairs& pairs, const index n ) {
for (index i = 0; i < pairs.get_num_pairs(); ++i) {
std::pair< index, index > pair = pairs.get_pair( i );
pairs.set_pair( i , n - 1 - pair.second, n - 1 - pair.first);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.